Spaces:
Sleeping
Sleeping
Update Dockerfile
Browse files- Dockerfile +21 -18
Dockerfile
CHANGED
|
@@ -1,11 +1,11 @@
|
|
| 1 |
-
# Use a lightweight Python base
|
| 2 |
FROM python:3.10-slim
|
| 3 |
|
| 4 |
# Set working directory
|
| 5 |
WORKDIR /app
|
| 6 |
|
| 7 |
-
# 1. Install
|
| 8 |
-
#
|
| 9 |
RUN apt-get update && apt-get install -y \
|
| 10 |
git \
|
| 11 |
curl \
|
|
@@ -14,11 +14,15 @@ RUN apt-get update && apt-get install -y \
|
|
| 14 |
libopenblas-dev \
|
| 15 |
&& rm -rf /var/lib/apt/lists/*
|
| 16 |
|
| 17 |
-
# 2.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
RUN curl -L -o /app/VT323.ttf https://github.com/google/fonts/raw/main/ofl/vt323/VT323-Regular.ttf
|
| 19 |
|
| 20 |
-
#
|
| 21 |
-
# We split the installation to ensure core libs are present before llama-cpp-python attempts to load.
|
| 22 |
RUN pip install --no-cache-dir \
|
| 23 |
torch \
|
| 24 |
torchvision \
|
|
@@ -34,21 +38,19 @@ RUN pip install --no-cache-dir \
|
|
| 34 |
safetensors \
|
| 35 |
scipy
|
| 36 |
|
| 37 |
-
#
|
| 38 |
-
#
|
| 39 |
-
#
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
ENV CMAKE_ARGS="-DLLAMA_BLAS=ON -DLLAMA_NATIVE=OFF"
|
| 43 |
-
RUN pip install --no-cache-dir llama-cpp-python
|
| 44 |
|
| 45 |
-
#
|
| 46 |
RUN useradd -m -u 1000 user
|
| 47 |
USER user
|
| 48 |
ENV HOME=/home/user \
|
| 49 |
PATH=/home/user/.local/bin:$PATH
|
| 50 |
|
| 51 |
-
#
|
| 52 |
COPY --chown=user <<'EOF' app.py
|
| 53 |
import sys, os, io, base64, json, pickle, time
|
| 54 |
import numpy as np
|
|
@@ -60,12 +62,13 @@ from flask import Flask, request, send_file, render_template_string
|
|
| 60 |
from flask_sock import Sock
|
| 61 |
from diffusers import StableDiffusionPipeline, AutoencoderTiny, LCMScheduler
|
| 62 |
from PIL import Image, ImageDraw
|
| 63 |
-
|
|
|
|
| 64 |
try:
|
| 65 |
from llama_cpp import Llama
|
| 66 |
except ImportError:
|
| 67 |
Llama = None
|
| 68 |
-
print("[!]
|
| 69 |
|
| 70 |
# ============================================================================
|
| 71 |
# 1. FRONTEND ASSET
|
|
@@ -506,6 +509,6 @@ if __name__ == '__main__':
|
|
| 506 |
app.run(host='0.0.0.0', port=7860, threaded=True)
|
| 507 |
EOF
|
| 508 |
|
| 509 |
-
#
|
| 510 |
EXPOSE 7860
|
| 511 |
CMD ["python", "app.py"]
|
|
|
|
| 1 |
+
# Use a lightweight Python base
|
| 2 |
FROM python:3.10-slim
|
| 3 |
|
| 4 |
# Set working directory
|
| 5 |
WORKDIR /app
|
| 6 |
|
| 7 |
+
# 1. Install Build Tools
|
| 8 |
+
# strictly required for the fallback compilation
|
| 9 |
RUN apt-get update && apt-get install -y \
|
| 10 |
git \
|
| 11 |
curl \
|
|
|
|
| 14 |
libopenblas-dev \
|
| 15 |
&& rm -rf /var/lib/apt/lists/*
|
| 16 |
|
| 17 |
+
# 2. CRITICAL: Upgrade pip
|
| 18 |
+
# Old pip versions (default in 3.10-slim) often fail to find binary wheels,
|
| 19 |
+
# forcing a slow source build. Upgrading fixes this.
|
| 20 |
+
RUN pip install --upgrade pip setuptools wheel
|
| 21 |
+
|
| 22 |
+
# 3. Download Retro Font (VT323)
|
| 23 |
RUN curl -L -o /app/VT323.ttf https://github.com/google/fonts/raw/main/ofl/vt323/VT323-Regular.ttf
|
| 24 |
|
| 25 |
+
# 4. Install Python Dependencies (Split for caching)
|
|
|
|
| 26 |
RUN pip install --no-cache-dir \
|
| 27 |
torch \
|
| 28 |
torchvision \
|
|
|
|
| 38 |
safetensors \
|
| 39 |
scipy
|
| 40 |
|
| 41 |
+
# 5. Install Llama-CPP-Python (The Fix)
|
| 42 |
+
# - CMAKE_ARGS="-DLLAMA_NATIVE=OFF": Prevents hanging on architecture detection.
|
| 43 |
+
# - -v: Verbose mode so you can see the build scrolling instead of freezing.
|
| 44 |
+
ENV CMAKE_ARGS="-DLLAMA_NATIVE=OFF -DLLAMA_BLAS=ON -DGGML_NATIVE=OFF"
|
| 45 |
+
RUN pip install --no-cache-dir -v llama-cpp-python
|
|
|
|
|
|
|
| 46 |
|
| 47 |
+
# 6. Create a non-root user
|
| 48 |
RUN useradd -m -u 1000 user
|
| 49 |
USER user
|
| 50 |
ENV HOME=/home/user \
|
| 51 |
PATH=/home/user/.local/bin:$PATH
|
| 52 |
|
| 53 |
+
# 7. Write the Monolith Application to disk
|
| 54 |
COPY --chown=user <<'EOF' app.py
|
| 55 |
import sys, os, io, base64, json, pickle, time
|
| 56 |
import numpy as np
|
|
|
|
| 62 |
from flask_sock import Sock
|
| 63 |
from diffusers import StableDiffusionPipeline, AutoencoderTiny, LCMScheduler
|
| 64 |
from PIL import Image, ImageDraw
|
| 65 |
+
|
| 66 |
+
# Graceful degradation if library fails (prevents crash loop)
|
| 67 |
try:
|
| 68 |
from llama_cpp import Llama
|
| 69 |
except ImportError:
|
| 70 |
Llama = None
|
| 71 |
+
print("[!] CRITICAL: Llama-cpp-python failed to import. AI features will be limited.")
|
| 72 |
|
| 73 |
# ============================================================================
|
| 74 |
# 1. FRONTEND ASSET
|
|
|
|
| 509 |
app.run(host='0.0.0.0', port=7860, threaded=True)
|
| 510 |
EOF
|
| 511 |
|
| 512 |
+
# 8. Launch the Monolith
|
| 513 |
EXPOSE 7860
|
| 514 |
CMD ["python", "app.py"]
|