Spaces:
Sleeping
Sleeping
Update Dockerfile
Browse files- Dockerfile +22 -11
Dockerfile
CHANGED
|
@@ -1,26 +1,37 @@
|
|
| 1 |
FROM python:3.11-slim
|
| 2 |
|
| 3 |
-
# Instala
|
| 4 |
RUN apt-get update && \
|
| 5 |
apt-get install -y --no-install-recommends \
|
| 6 |
xz-utils && \
|
| 7 |
pip install --no-cache-dir huggingface_hub && \
|
| 8 |
rm -rf /var/lib/apt/lists/*
|
| 9 |
|
| 10 |
-
#
|
| 11 |
-
|
| 12 |
-
WORKDIR /
|
|
|
|
| 13 |
|
| 14 |
-
# Script de arranque
|
| 15 |
RUN echo '#!/usr/bin/env python3' > /run.py && \
|
| 16 |
echo 'import os' >> /run.py && \
|
| 17 |
echo 'from huggingface_hub import hf_hub_download' >> /run.py && \
|
| 18 |
-
echo 'print("
|
| 19 |
-
echo '
|
| 20 |
-
echo '
|
| 21 |
-
echo '
|
| 22 |
-
echo '
|
| 23 |
-
echo '
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
echo 'os.system("python3 -m http.server 7860")' >> /run.py
|
| 25 |
|
| 26 |
EXPOSE 7860
|
|
|
|
| 1 |
FROM python:3.11-slim
|
| 2 |
|
| 3 |
+
# Instala herramientas necesarias
|
| 4 |
RUN apt-get update && \
|
| 5 |
apt-get install -y --no-install-recommends \
|
| 6 |
xz-utils && \
|
| 7 |
pip install --no-cache-dir huggingface_hub && \
|
| 8 |
rm -rf /var/lib/apt/lists/*
|
| 9 |
|
| 10 |
+
# Variables de entorno
|
| 11 |
+
ENV HF_HOME=/workspace/.hf
|
| 12 |
+
WORKDIR /workspace
|
| 13 |
+
RUN mkdir -p /workspace/.hf /data
|
| 14 |
|
| 15 |
+
# Script de arranque con logging detallado
|
| 16 |
RUN echo '#!/usr/bin/env python3' > /run.py && \
|
| 17 |
echo 'import os' >> /run.py && \
|
| 18 |
echo 'from huggingface_hub import hf_hub_download' >> /run.py && \
|
| 19 |
+
echo 'print("===== BitNet GGUF Downloader =====")' >> /run.py && \
|
| 20 |
+
echo 'print("[1/3] Descargando modelo desde Hugging Face...")' >> /run.py && \
|
| 21 |
+
echo 'path = hf_hub_download(' >> /run.py && \
|
| 22 |
+
echo ' repo_id="microsoft/bitnet-b1.58-2B-4T-gguf",' >> /run.py && \
|
| 23 |
+
echo ' filename="ggml-model-i2_s.gguf",' >> /run.py && \
|
| 24 |
+
echo ' local_dir="/data",' >> /run.py && \
|
| 25 |
+
echo ' cache_dir="/workspace/.hf"' >> /run.py && \
|
| 26 |
+
echo ')' >> /run.py && \
|
| 27 |
+
echo 'print(f"Modelo descargado en: {path}")' >> /run.py && \
|
| 28 |
+
echo 'print("[2/3] Comprimimos con tar.xz (modo extremo)...")' >> /run.py && \
|
| 29 |
+
echo 'os.system("tar -C /data -c -f /data/model.tar.xz -I \\"xz -9e -T0\\" ggml-model-i2_s.gguf")' >> /run.py && \
|
| 30 |
+
echo 'print("[3/3] Tama帽os:")' >> /run.py && \
|
| 31 |
+
echo 'os.system("du -h /data/ggml-model-i2_s.gguf")' >> /run.py && \
|
| 32 |
+
echo 'os.system("du -h /data/model.tar.xz")' >> /run.py && \
|
| 33 |
+
echo 'print("Servidor HTTP activo en puerto 7860")' >> /run.py && \
|
| 34 |
+
echo 'os.chdir("/data")' >> /run.py && \
|
| 35 |
echo 'os.system("python3 -m http.server 7860")' >> /run.py
|
| 36 |
|
| 37 |
EXPOSE 7860
|