Spaces:
Sleeping
Sleeping
Update Dockerfile
Browse files- Dockerfile +24 -26
Dockerfile
CHANGED
|
@@ -1,34 +1,25 @@
|
|
| 1 |
-
# Use a lightweight Python base
|
| 2 |
FROM python:3.10-slim
|
| 3 |
|
| 4 |
# Set working directory
|
| 5 |
WORKDIR /app
|
| 6 |
|
| 7 |
-
# 1. Install
|
| 8 |
-
#
|
| 9 |
-
# llama-cpp-python can compile from source if it can't find a wheel.
|
| 10 |
RUN apt-get update && apt-get install -y \
|
| 11 |
git \
|
| 12 |
curl \
|
| 13 |
build-essential \
|
| 14 |
cmake \
|
| 15 |
-
|
| 16 |
-
pkg-config \
|
| 17 |
-
gcc \
|
| 18 |
-
g++ \
|
| 19 |
&& rm -rf /var/lib/apt/lists/*
|
| 20 |
|
| 21 |
-
# 2.
|
| 22 |
-
RUN pip install --upgrade pip setuptools wheel
|
| 23 |
-
|
| 24 |
-
# 3. Download Retro Font (VT323)
|
| 25 |
RUN curl -L -o /app/VT323.ttf https://github.com/google/fonts/raw/main/ofl/vt323/VT323-Regular.ttf
|
| 26 |
|
| 27 |
-
#
|
| 28 |
-
# We
|
| 29 |
-
|
| 30 |
-
ENV CMAKE_ARGS="-DLLAMA_NATIVE=OFF -DGGML_NATIVE=OFF"
|
| 31 |
-
RUN pip install --no-cache-dir -v \
|
| 32 |
torch \
|
| 33 |
torchvision \
|
| 34 |
numpy \
|
|
@@ -43,19 +34,21 @@ RUN pip install --no-cache-dir -v \
|
|
| 43 |
safetensors \
|
| 44 |
scipy
|
| 45 |
|
| 46 |
-
#
|
| 47 |
-
#
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
|
|
|
|
|
|
| 51 |
|
| 52 |
-
#
|
| 53 |
RUN useradd -m -u 1000 user
|
| 54 |
USER user
|
| 55 |
ENV HOME=/home/user \
|
| 56 |
PATH=/home/user/.local/bin:$PATH
|
| 57 |
|
| 58 |
-
#
|
| 59 |
COPY --chown=user <<'EOF' app.py
|
| 60 |
import sys, os, io, base64, json, pickle, time
|
| 61 |
import numpy as np
|
|
@@ -67,7 +60,12 @@ from flask import Flask, request, send_file, render_template_string
|
|
| 67 |
from flask_sock import Sock
|
| 68 |
from diffusers import StableDiffusionPipeline, AutoencoderTiny, LCMScheduler
|
| 69 |
from PIL import Image, ImageDraw
|
| 70 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 71 |
|
| 72 |
# ============================================================================
|
| 73 |
# 1. FRONTEND ASSET
|
|
@@ -508,6 +506,6 @@ if __name__ == '__main__':
|
|
| 508 |
app.run(host='0.0.0.0', port=7860, threaded=True)
|
| 509 |
EOF
|
| 510 |
|
| 511 |
-
#
|
| 512 |
EXPOSE 7860
|
| 513 |
CMD ["python", "app.py"]
|
|
|
|
| 1 |
+
# Use a lightweight Python base (Debian-based)
|
| 2 |
FROM python:3.10-slim
|
| 3 |
|
| 4 |
# Set working directory
|
| 5 |
WORKDIR /app
|
| 6 |
|
| 7 |
+
# 1. Install System Dependencies
|
| 8 |
+
# 'cmake' and 'build-essential' are strictly required if wheel installation fails.
|
|
|
|
| 9 |
RUN apt-get update && apt-get install -y \
|
| 10 |
git \
|
| 11 |
curl \
|
| 12 |
build-essential \
|
| 13 |
cmake \
|
| 14 |
+
libopenblas-dev \
|
|
|
|
|
|
|
|
|
|
| 15 |
&& rm -rf /var/lib/apt/lists/*
|
| 16 |
|
| 17 |
+
# 2. Download Retro Font (VT323)
|
|
|
|
|
|
|
|
|
|
| 18 |
RUN curl -L -o /app/VT323.ttf https://github.com/google/fonts/raw/main/ofl/vt323/VT323-Regular.ttf
|
| 19 |
|
| 20 |
+
# 3. Install Python Dependencies
|
| 21 |
+
# We split the installation to ensure core libs are present before llama-cpp-python attempts to load.
|
| 22 |
+
RUN pip install --no-cache-dir \
|
|
|
|
|
|
|
| 23 |
torch \
|
| 24 |
torchvision \
|
| 25 |
numpy \
|
|
|
|
| 34 |
safetensors \
|
| 35 |
scipy
|
| 36 |
|
| 37 |
+
# 4. Install Llama-CPP-Python (Corrected)
|
| 38 |
+
# REMOVED: The '--prefer-binary' flag pointing to the wrong index.
|
| 39 |
+
# CHANGED: We now let pip build from source using the installed CMake/GCC.
|
| 40 |
+
# This guarantees compatibility with the Debian container (glibc).
|
| 41 |
+
# We set CMAKE_ARGS to ensure it builds a CPU-only version (no CUDA req).
|
| 42 |
+
ENV CMAKE_ARGS="-DLLAMA_BLAS=ON -DLLAMA_NATIVE=OFF"
|
| 43 |
+
RUN pip install --no-cache-dir llama-cpp-python
|
| 44 |
|
| 45 |
+
# 5. Create a non-root user
|
| 46 |
RUN useradd -m -u 1000 user
|
| 47 |
USER user
|
| 48 |
ENV HOME=/home/user \
|
| 49 |
PATH=/home/user/.local/bin:$PATH
|
| 50 |
|
| 51 |
+
# 6. Write the Monolith Application to disk
|
| 52 |
COPY --chown=user <<'EOF' app.py
|
| 53 |
import sys, os, io, base64, json, pickle, time
|
| 54 |
import numpy as np
|
|
|
|
| 60 |
from flask_sock import Sock
|
| 61 |
from diffusers import StableDiffusionPipeline, AutoencoderTiny, LCMScheduler
|
| 62 |
from PIL import Image, ImageDraw
|
| 63 |
+
# Wrap Llama import to prevent crash if model not found
|
| 64 |
+
try:
|
| 65 |
+
from llama_cpp import Llama
|
| 66 |
+
except ImportError:
|
| 67 |
+
Llama = None
|
| 68 |
+
print("[!] Warning: llama-cpp-python not loaded correctly.")
|
| 69 |
|
| 70 |
# ============================================================================
|
| 71 |
# 1. FRONTEND ASSET
|
|
|
|
| 506 |
app.run(host='0.0.0.0', port=7860, threaded=True)
|
| 507 |
EOF
|
| 508 |
|
| 509 |
+
# 7. Launch the Monolith
|
| 510 |
EXPOSE 7860
|
| 511 |
CMD ["python", "app.py"]
|