Spaces:
Sleeping
Sleeping
Update Dockerfile
Browse files- Dockerfile +23 -15
Dockerfile
CHANGED
|
@@ -4,24 +4,31 @@ FROM python:3.10-slim
|
|
| 4 |
# Set working directory
|
| 5 |
WORKDIR /app
|
| 6 |
|
| 7 |
-
# 1. Install
|
| 8 |
-
#
|
| 9 |
-
#
|
| 10 |
RUN apt-get update && apt-get install -y \
|
| 11 |
git \
|
| 12 |
curl \
|
| 13 |
build-essential \
|
| 14 |
cmake \
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
&& rm -rf /var/lib/apt/lists/*
|
| 16 |
|
| 17 |
-
# 2.
|
| 18 |
-
RUN
|
| 19 |
|
| 20 |
-
# 3.
|
| 21 |
-
|
| 22 |
|
| 23 |
-
#
|
| 24 |
-
|
|
|
|
|
|
|
|
|
|
| 25 |
torch \
|
| 26 |
torchvision \
|
| 27 |
numpy \
|
|
@@ -36,18 +43,19 @@ RUN pip install --no-cache-dir \
|
|
| 36 |
safetensors \
|
| 37 |
scipy
|
| 38 |
|
| 39 |
-
#
|
| 40 |
-
#
|
| 41 |
-
RUN pip install llama-cpp-python \
|
|
|
|
| 42 |
--extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cpu
|
| 43 |
|
| 44 |
-
#
|
| 45 |
RUN useradd -m -u 1000 user
|
| 46 |
USER user
|
| 47 |
ENV HOME=/home/user \
|
| 48 |
PATH=/home/user/.local/bin:$PATH
|
| 49 |
|
| 50 |
-
#
|
| 51 |
COPY --chown=user <<'EOF' app.py
|
| 52 |
import sys, os, io, base64, json, pickle, time
|
| 53 |
import numpy as np
|
|
@@ -500,6 +508,6 @@ if __name__ == '__main__':
|
|
| 500 |
app.run(host='0.0.0.0', port=7860, threaded=True)
|
| 501 |
EOF
|
| 502 |
|
| 503 |
-
#
|
| 504 |
EXPOSE 7860
|
| 505 |
CMD ["python", "app.py"]
|
|
|
|
| 4 |
# Set working directory
|
| 5 |
WORKDIR /app
|
| 6 |
|
| 7 |
+
# 1. Install HEAVY Build Dependencies
|
| 8 |
+
# We include 'cmake', 'ninja-build', and 'pkg-config' to ensure
|
| 9 |
+
# llama-cpp-python can compile from source if it can't find a wheel.
|
| 10 |
RUN apt-get update && apt-get install -y \
|
| 11 |
git \
|
| 12 |
curl \
|
| 13 |
build-essential \
|
| 14 |
cmake \
|
| 15 |
+
ninja-build \
|
| 16 |
+
pkg-config \
|
| 17 |
+
gcc \
|
| 18 |
+
g++ \
|
| 19 |
&& rm -rf /var/lib/apt/lists/*
|
| 20 |
|
| 21 |
+
# 2. Upgrade pip to the latest version (Crucial for modern wheels)
|
| 22 |
+
RUN pip install --upgrade pip setuptools wheel
|
| 23 |
|
| 24 |
+
# 3. Download Retro Font (VT323)
|
| 25 |
+
RUN curl -L -o /app/VT323.ttf https://github.com/google/fonts/raw/main/ofl/vt323/VT323-Regular.ttf
|
| 26 |
|
| 27 |
+
# 4. Install Dependencies - VERBOSE MODE
|
| 28 |
+
# We set CMAKE_ARGS to simplify the build and use '-v' to show progress.
|
| 29 |
+
# This prevents the "silent freeze" by showing you the compilation logs.
|
| 30 |
+
ENV CMAKE_ARGS="-DLLAMA_NATIVE=OFF -DGGML_NATIVE=OFF"
|
| 31 |
+
RUN pip install --no-cache-dir -v \
|
| 32 |
torch \
|
| 33 |
torchvision \
|
| 34 |
numpy \
|
|
|
|
| 43 |
safetensors \
|
| 44 |
scipy
|
| 45 |
|
| 46 |
+
# 5. Install Llama-CPP separately with Force Logic
|
| 47 |
+
# We try to force a binary. If it fails, it falls back to a verbose build.
|
| 48 |
+
RUN pip install --no-cache-dir -v llama-cpp-python==0.2.90 \
|
| 49 |
+
--prefer-binary \
|
| 50 |
--extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cpu
|
| 51 |
|
| 52 |
+
# 6. Create a non-root user
|
| 53 |
RUN useradd -m -u 1000 user
|
| 54 |
USER user
|
| 55 |
ENV HOME=/home/user \
|
| 56 |
PATH=/home/user/.local/bin:$PATH
|
| 57 |
|
| 58 |
+
# 7. Write the Monolith Application to disk
|
| 59 |
COPY --chown=user <<'EOF' app.py
|
| 60 |
import sys, os, io, base64, json, pickle, time
|
| 61 |
import numpy as np
|
|
|
|
| 508 |
app.run(host='0.0.0.0', port=7860, threaded=True)
|
| 509 |
EOF
|
| 510 |
|
| 511 |
+
# 8. Launch the Monolith
|
| 512 |
EXPOSE 7860
|
| 513 |
CMD ["python", "app.py"]
|