Spaces:
Sleeping
Sleeping
pakito312
commited on
Commit
·
5143de5
1
Parent(s):
ca1c16e
update
Browse files- Dockerfile +5 -12
- api.py +23 -11
- download_model.py +13 -3
- requirements.txt +3 -2
Dockerfile
CHANGED
|
@@ -8,14 +8,10 @@ RUN apt-get update && apt-get install -y \
|
|
| 8 |
curl \
|
| 9 |
&& rm -rf /var/lib/apt/lists/*
|
| 10 |
|
| 11 |
-
# Installer
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
uvicorn \
|
| 16 |
-
pydantic \
|
| 17 |
-
requests \
|
| 18 |
-
huggingface-hub
|
| 19 |
|
| 20 |
# Créer un utilisateur non-root
|
| 21 |
RUN useradd -m -u 1000 user
|
|
@@ -26,10 +22,7 @@ WORKDIR /home/user
|
|
| 26 |
COPY --chown=user:user api.py .
|
| 27 |
COPY --chown=user:user download_model.py .
|
| 28 |
|
| 29 |
-
# Télécharger le modèle GGUF au build (optionnel)
|
| 30 |
-
# RUN python download_model.py
|
| 31 |
-
|
| 32 |
EXPOSE 7860
|
| 33 |
|
| 34 |
# Démarrer
|
| 35 |
-
CMD ["uvicorn", "
|
|
|
|
| 8 |
curl \
|
| 9 |
&& rm -rf /var/lib/apt/lists/*
|
| 10 |
|
| 11 |
+
# Installer les dépendances Python
|
| 12 |
+
COPY requirements.txt .
|
| 13 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 14 |
+
RUN pip install --no-cache-dir huggingface-hub
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
|
| 16 |
# Créer un utilisateur non-root
|
| 17 |
RUN useradd -m -u 1000 user
|
|
|
|
| 22 |
COPY --chown=user:user api.py .
|
| 23 |
COPY --chown=user:user download_model.py .
|
| 24 |
|
|
|
|
|
|
|
|
|
|
| 25 |
EXPOSE 7860
|
| 26 |
|
| 27 |
# Démarrer
|
| 28 |
+
CMD ["uvicorn", "api:app", "--host", "0.0.0.0", "--port", "7860"]
|
api.py
CHANGED
|
@@ -11,17 +11,23 @@ from contextlib import asynccontextmanager
|
|
| 11 |
from fastapi import FastAPI, HTTPException
|
| 12 |
from fastapi.middleware.cors import CORSMiddleware
|
| 13 |
from pydantic import BaseModel, Field
|
| 14 |
-
from huggingface_hub import hf_hub_download
|
| 15 |
|
| 16 |
# Import llama_cpp
|
| 17 |
try:
|
| 18 |
from llama_cpp import Llama
|
| 19 |
-
from llama_cpp.server.app import create_app, Settings
|
| 20 |
except ImportError:
|
| 21 |
# Fallback si llama_cpp_python n'est pas installé
|
| 22 |
Llama = None
|
| 23 |
|
| 24 |
# ========== CONFIGURATION ==========
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
MODEL_REPO = "bartowski/DeepSeek-Coder-1.3B-Instruct-GGUF"
|
| 26 |
MODEL_FILES = [
|
| 27 |
"DeepSeek-Coder-1.3B-Instruct-Q4_K_M.gguf", # 900MB - Bon compromis
|
|
@@ -42,7 +48,7 @@ class GenerateRequest(BaseModel):
|
|
| 42 |
stream: bool = False
|
| 43 |
|
| 44 |
class ChatMessage(BaseModel):
|
| 45 |
-
role: str = Field(...,
|
| 46 |
content: str
|
| 47 |
|
| 48 |
class ChatRequest(BaseModel):
|
|
@@ -60,6 +66,9 @@ class ModelManager:
|
|
| 60 |
|
| 61 |
def find_or_download_model(self):
|
| 62 |
"""Trouver ou télécharger le modèle GGUF"""
|
|
|
|
|
|
|
|
|
|
| 63 |
# Vérifier si un modèle existe déjà
|
| 64 |
for model_file in MODEL_FILES:
|
| 65 |
local_path = os.path.join(MODEL_DIR, model_file)
|
|
@@ -91,6 +100,9 @@ class ModelManager:
|
|
| 91 |
"""Charger le modèle avec llama_cpp"""
|
| 92 |
if self.llm is not None:
|
| 93 |
return self.llm
|
|
|
|
|
|
|
|
|
|
| 94 |
|
| 95 |
print("🔧 Chargement du modèle...")
|
| 96 |
self.loading = True
|
|
@@ -100,9 +112,9 @@ class ModelManager:
|
|
| 100 |
self.model_path = self.find_or_download_model()
|
| 101 |
|
| 102 |
# Configurer le modèle (optimisé pour Hugging Face 16GB RAM)
|
| 103 |
-
n_gpu_layers =
|
| 104 |
-
n_threads =
|
| 105 |
-
n_ctx =
|
| 106 |
|
| 107 |
print(f"🔄 Chargement depuis: {self.model_path}")
|
| 108 |
print(f"⚙️ Configuration: GPU layers={n_gpu_layers}, Threads={n_threads}, Context={n_ctx}")
|
|
@@ -215,7 +227,7 @@ app = FastAPI(
|
|
| 215 |
description="API ultra-rapide avec llama_cpp_python",
|
| 216 |
version="2.0.0",
|
| 217 |
docs_url="/docs",
|
| 218 |
-
redoc_url=
|
| 219 |
lifespan=lifespan
|
| 220 |
)
|
| 221 |
|
|
@@ -310,11 +322,11 @@ async def chat(request: ChatRequest):
|
|
| 310 |
async def list_models():
|
| 311 |
"""Lister les modèles disponibles"""
|
| 312 |
models = []
|
| 313 |
-
if model_manager.model_path:
|
| 314 |
models.append({
|
| 315 |
"name": "deepseek-coder-1.3b",
|
| 316 |
"path": model_manager.model_path,
|
| 317 |
-
"size_mb": os.path.getsize(model_manager.model_path) / 1024 / 1024
|
| 318 |
"loaded": model_manager.llm is not None
|
| 319 |
})
|
| 320 |
|
|
@@ -326,11 +338,11 @@ async def demo():
|
|
| 326 |
examples = [
|
| 327 |
{
|
| 328 |
"endpoint": "POST /generate",
|
| 329 |
-
"curl": 'curl -X POST https://
|
| 330 |
},
|
| 331 |
{
|
| 332 |
"endpoint": "POST /chat",
|
| 333 |
-
"curl": 'curl -X POST https://
|
| 334 |
}
|
| 335 |
]
|
| 336 |
return {"examples": examples}
|
|
|
|
| 11 |
from fastapi import FastAPI, HTTPException
|
| 12 |
from fastapi.middleware.cors import CORSMiddleware
|
| 13 |
from pydantic import BaseModel, Field
|
|
|
|
| 14 |
|
| 15 |
# Import llama_cpp
|
| 16 |
try:
|
| 17 |
from llama_cpp import Llama
|
|
|
|
| 18 |
except ImportError:
|
| 19 |
# Fallback si llama_cpp_python n'est pas installé
|
| 20 |
Llama = None
|
| 21 |
|
| 22 |
# ========== CONFIGURATION ==========
|
| 23 |
+
# IMPORTANT: huggingface_hub doit être importé APRÈS les vérifications
|
| 24 |
+
# car il peut causer des conflits d'import
|
| 25 |
+
try:
|
| 26 |
+
from huggingface_hub import hf_hub_download
|
| 27 |
+
HF_AVAILABLE = True
|
| 28 |
+
except ImportError:
|
| 29 |
+
HF_AVAILABLE = False
|
| 30 |
+
|
| 31 |
MODEL_REPO = "bartowski/DeepSeek-Coder-1.3B-Instruct-GGUF"
|
| 32 |
MODEL_FILES = [
|
| 33 |
"DeepSeek-Coder-1.3B-Instruct-Q4_K_M.gguf", # 900MB - Bon compromis
|
|
|
|
| 48 |
stream: bool = False
|
| 49 |
|
| 50 |
class ChatMessage(BaseModel):
|
| 51 |
+
role: str = Field(..., pattern="^(user|assistant|system)$")
|
| 52 |
content: str
|
| 53 |
|
| 54 |
class ChatRequest(BaseModel):
|
|
|
|
| 66 |
|
| 67 |
def find_or_download_model(self):
|
| 68 |
"""Trouver ou télécharger le modèle GGUF"""
|
| 69 |
+
if not HF_AVAILABLE:
|
| 70 |
+
raise Exception("huggingface-hub n'est pas installé")
|
| 71 |
+
|
| 72 |
# Vérifier si un modèle existe déjà
|
| 73 |
for model_file in MODEL_FILES:
|
| 74 |
local_path = os.path.join(MODEL_DIR, model_file)
|
|
|
|
| 100 |
"""Charger le modèle avec llama_cpp"""
|
| 101 |
if self.llm is not None:
|
| 102 |
return self.llm
|
| 103 |
+
|
| 104 |
+
if Llama is None:
|
| 105 |
+
raise Exception("llama_cpp n'est pas installé")
|
| 106 |
|
| 107 |
print("🔧 Chargement du modèle...")
|
| 108 |
self.loading = True
|
|
|
|
| 112 |
self.model_path = self.find_or_download_model()
|
| 113 |
|
| 114 |
# Configurer le modèle (optimisé pour Hugging Face 16GB RAM)
|
| 115 |
+
n_gpu_layers = 0 # Pas de GPU sur Hugging Face Spaces gratuit
|
| 116 |
+
n_threads = 2 # 2 threads CPU (conservateur)
|
| 117 |
+
n_ctx = 1024 # Contexte limité pour économiser la RAM
|
| 118 |
|
| 119 |
print(f"🔄 Chargement depuis: {self.model_path}")
|
| 120 |
print(f"⚙️ Configuration: GPU layers={n_gpu_layers}, Threads={n_threads}, Context={n_ctx}")
|
|
|
|
| 227 |
description="API ultra-rapide avec llama_cpp_python",
|
| 228 |
version="2.0.0",
|
| 229 |
docs_url="/docs",
|
| 230 |
+
redoc_url=None,
|
| 231 |
lifespan=lifespan
|
| 232 |
)
|
| 233 |
|
|
|
|
| 322 |
async def list_models():
|
| 323 |
"""Lister les modèles disponibles"""
|
| 324 |
models = []
|
| 325 |
+
if model_manager.model_path and os.path.exists(model_manager.model_path):
|
| 326 |
models.append({
|
| 327 |
"name": "deepseek-coder-1.3b",
|
| 328 |
"path": model_manager.model_path,
|
| 329 |
+
"size_mb": round(os.path.getsize(model_manager.model_path) / 1024 / 1024, 2),
|
| 330 |
"loaded": model_manager.llm is not None
|
| 331 |
})
|
| 332 |
|
|
|
|
| 338 |
examples = [
|
| 339 |
{
|
| 340 |
"endpoint": "POST /generate",
|
| 341 |
+
"curl": 'curl -X POST https://digitaldev2024-allma.hf.space/generate -H "Content-Type: application/json" -d \'{"prompt": "def fibonacci(n):", "temperature": 0.2}\''
|
| 342 |
},
|
| 343 |
{
|
| 344 |
"endpoint": "POST /chat",
|
| 345 |
+
"curl": 'curl -X POST https://digitaldev2024-allma.hf.space/chat -H "Content-Type: application/json" -d \'{"messages": [{"role": "user", "content": "Write Python code for binary search"}], "temperature": 0.2}\''
|
| 346 |
}
|
| 347 |
]
|
| 348 |
return {"examples": examples}
|
download_model.py
CHANGED
|
@@ -1,9 +1,15 @@
|
|
| 1 |
"""
|
| 2 |
Télécharger le modèle DeepSeek-Coder au format GGUF
|
| 3 |
"""
|
| 4 |
-
from huggingface_hub import hf_hub_download
|
| 5 |
import os
|
| 6 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
# Configuration
|
| 8 |
MODEL_REPO = "bartowski/DeepSeek-Coder-1.3B-Instruct-GGUF"
|
| 9 |
MODEL_FILE = "DeepSeek-Coder-1.3B-Instruct-Q4_K_M.gguf"
|
|
@@ -11,6 +17,10 @@ LOCAL_PATH = "./models"
|
|
| 11 |
|
| 12 |
def download_model():
|
| 13 |
"""Télécharger le modèle GGUF"""
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
os.makedirs(LOCAL_PATH, exist_ok=True)
|
| 15 |
|
| 16 |
print(f"📥 Téléchargement de {MODEL_FILE}...")
|
|
@@ -43,8 +53,8 @@ def download_model():
|
|
| 43 |
)
|
| 44 |
print(f"✅ Modèle de secours téléchargé")
|
| 45 |
return model_path
|
| 46 |
-
except:
|
| 47 |
-
print("❌ Impossible de télécharger aucun modèle")
|
| 48 |
return None
|
| 49 |
|
| 50 |
if __name__ == "__main__":
|
|
|
|
| 1 |
"""
|
| 2 |
Télécharger le modèle DeepSeek-Coder au format GGUF
|
| 3 |
"""
|
|
|
|
| 4 |
import os
|
| 5 |
|
| 6 |
+
try:
|
| 7 |
+
from huggingface_hub import hf_hub_download
|
| 8 |
+
HF_AVAILABLE = True
|
| 9 |
+
except ImportError:
|
| 10 |
+
HF_AVAILABLE = False
|
| 11 |
+
print("❌ huggingface-hub n'est pas installé")
|
| 12 |
+
|
| 13 |
# Configuration
|
| 14 |
MODEL_REPO = "bartowski/DeepSeek-Coder-1.3B-Instruct-GGUF"
|
| 15 |
MODEL_FILE = "DeepSeek-Coder-1.3B-Instruct-Q4_K_M.gguf"
|
|
|
|
| 17 |
|
| 18 |
def download_model():
|
| 19 |
"""Télécharger le modèle GGUF"""
|
| 20 |
+
if not HF_AVAILABLE:
|
| 21 |
+
print("❌ Impossible de télécharger: huggingface-hub non disponible")
|
| 22 |
+
return None
|
| 23 |
+
|
| 24 |
os.makedirs(LOCAL_PATH, exist_ok=True)
|
| 25 |
|
| 26 |
print(f"📥 Téléchargement de {MODEL_FILE}...")
|
|
|
|
| 53 |
)
|
| 54 |
print(f"✅ Modèle de secours téléchargé")
|
| 55 |
return model_path
|
| 56 |
+
except Exception as e2:
|
| 57 |
+
print(f"❌ Impossible de télécharger aucun modèle: {e2}")
|
| 58 |
return None
|
| 59 |
|
| 60 |
if __name__ == "__main__":
|
requirements.txt
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
fastapi
|
| 2 |
-
uvicorn
|
| 3 |
llama-cpp-python==0.2.77
|
| 4 |
-
pydantic
|
|
|
|
|
|
| 1 |
fastapi
|
| 2 |
+
uvicorn[standard]==0.24.0
|
| 3 |
llama-cpp-python==0.2.77
|
| 4 |
+
pydantic==2.5.0
|
| 5 |
+
huggingface-hub==0.20.3
|