OrbitMC commited on
Commit
7c3baf3
·
verified ·
1 Parent(s): fc357cb

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +24 -16
Dockerfile CHANGED
@@ -1,39 +1,47 @@
1
- FROM python:3.11-slim
2
 
3
- # CRITICAL: This forces Python to print logs immediately instead of hiding them
4
- ENV PYTHONUNBUFFERED=1
5
 
6
- # Install minimal system tools needed for audio (ffmpeg)
7
  RUN apt-get update && apt-get install -y --no-install-recommends \
 
 
8
  ffmpeg \
9
  && rm -rf /var/lib/apt/lists/*
10
 
11
- # Set up user for Hugging Face Spaces
12
  RUN useradd -m -u 1000 user
13
  USER user
 
 
14
  ENV PATH="/home/user/.local/bin:$PATH"
15
  ENV HF_HOME="/home/user/.cache/huggingface"
 
 
16
 
17
  WORKDIR /home/user/app
18
 
19
- # 1. Install standard dependencies first
20
- RUN pip install --no-cache-dir --upgrade pip && \
21
- pip install --no-cache-dir \
 
22
  flask==3.1.1 \
23
  edge-tts \
24
  num2words==0.5.14 \
25
  "huggingface_hub>=0.27.0" \
26
- "numpy<2.0"
27
-
28
- # 2. Install llama-cpp-python safely.
29
- # --only-binary llama-cpp-python ensures it NEVER tries to compile from source and hang forever.
30
- RUN pip install --no-cache-dir llama-cpp-python \
31
- --only-binary llama-cpp-python \
32
  --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cpu
33
 
34
- # Copy the rest of the application
35
  COPY --chown=user:user . .
36
 
37
  EXPOSE 7860
38
 
39
- CMD ["python", "app.py"]
 
 
 
 
 
 
1
+ FROM ghcr.io/ggml-org/llama.cpp:full
2
 
3
+ # 1. Switch to root to install system dependencies
4
+ USER root
5
 
6
+ # 2. The 'full' image is Ubuntu-based but lacks pip and ffmpeg. Install them.
7
  RUN apt-get update && apt-get install -y --no-install-recommends \
8
+ python3 \
9
+ python3-pip \
10
  ffmpeg \
11
  && rm -rf /var/lib/apt/lists/*
12
 
13
+ # 3. Hugging Face Spaces REQUIRES running as a non-root user (uid 1000)
14
  RUN useradd -m -u 1000 user
15
  USER user
16
+
17
+ # 4. Set environment variables
18
  ENV PATH="/home/user/.local/bin:$PATH"
19
  ENV HF_HOME="/home/user/.cache/huggingface"
20
+ # CRITICAL: This disables buffering so logs print instantly in the HF Console
21
+ ENV PYTHONUNBUFFERED=1
22
 
23
  WORKDIR /home/user/app
24
 
25
+ # 5. Install Python dependencies safely.
26
+ # Because this is pure Ubuntu, pip will correctly pull the instant glibc CPU wheel.
27
+ RUN python3 -m pip install --no-cache-dir --upgrade pip && \
28
+ python3 -m pip install --no-cache-dir \
29
  flask==3.1.1 \
30
  edge-tts \
31
  num2words==0.5.14 \
32
  "huggingface_hub>=0.27.0" \
33
+ "numpy<2.0" \
34
+ llama-cpp-python \
 
 
 
 
35
  --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cpu
36
 
37
+ # 6. Copy your Python app into the container
38
  COPY --chown=user:user . .
39
 
40
  EXPOSE 7860
41
 
42
+ # 7. CRITICAL: We MUST clear the base image's entrypoint.
43
+ # Otherwise, it attempts to run `/app/llama-cli python3 app.py` and crashes.
44
+ ENTRYPOINT[]
45
+
46
+ # 8. Start the app
47
+ CMD ["python3", "app.py"]