Spaces:
Sleeping
Sleeping
pakito312
commited on
Commit
·
e4972c4
1
Parent(s):
6f5179d
update
Browse files- .env +5 -0
- Dockerfile +43 -0
- README.md +15 -10
- app.py +267 -0
- auto_deploy.sh +30 -0
- requirements.txt +5 -0
- start.sh +47 -0
.env
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
OLLAMA_HOST=0.0.0.0
|
| 2 |
+
OLLAMA_KEEP_ALIVE=24h
|
| 3 |
+
OLLAMA_NUM_PARALLEL=1
|
| 4 |
+
HF_SPACE=true
|
| 5 |
+
GRADIO_SERVER_PORT=7860
|
Dockerfile
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Dockerfile optimisé pour Hugging Face Spaces
|
| 2 |
+
FROM python:3.10-slim
|
| 3 |
+
|
| 4 |
+
# Variables d'environnement
|
| 5 |
+
ENV DEBIAN_FRONTEND=noninteractive \
|
| 6 |
+
OLLAMA_HOST=0.0.0.0 \
|
| 7 |
+
OLLAMA_KEEP_ALIVE=24h \
|
| 8 |
+
HF_SPACE=true
|
| 9 |
+
|
| 10 |
+
# Installer les dépendances système
|
| 11 |
+
RUN apt-get update && apt-get install -y \
|
| 12 |
+
curl \
|
| 13 |
+
ca-certificates \
|
| 14 |
+
zstd \
|
| 15 |
+
gnupg \
|
| 16 |
+
wget \
|
| 17 |
+
&& apt-get clean \
|
| 18 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 19 |
+
|
| 20 |
+
# Installer Ollama
|
| 21 |
+
RUN curl -fsSL https://ollama.ai/install.sh | sh
|
| 22 |
+
|
| 23 |
+
# Créer un utilisateur non-root pour Hugging Face
|
| 24 |
+
RUN useradd -m -u 1000 -s /bin/bash user
|
| 25 |
+
USER user
|
| 26 |
+
WORKDIR /home/user
|
| 27 |
+
|
| 28 |
+
# Copier l'application
|
| 29 |
+
COPY --chown=user:user app.py .
|
| 30 |
+
COPY --chown=user:user requirements.txt .
|
| 31 |
+
|
| 32 |
+
# Installer les dépendances Python
|
| 33 |
+
RUN pip install --no-cache-dir --user -r requirements.txt
|
| 34 |
+
|
| 35 |
+
# Exposer les ports (Hugging Face utilise 7860)
|
| 36 |
+
EXPOSE 7860
|
| 37 |
+
EXPOSE 11434
|
| 38 |
+
|
| 39 |
+
# Script de démarrage optimisé pour HF
|
| 40 |
+
COPY --chown=user:user start.sh .
|
| 41 |
+
RUN chmod +x start.sh
|
| 42 |
+
|
| 43 |
+
CMD ["./start.sh"]
|
README.md
CHANGED
|
@@ -1,10 +1,15 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 🤖 DeepSeek-Coder 1.3B sur Hugging Face
|
| 2 |
+
|
| 3 |
+
Déploiement local de DeepSeek-Coder 1.3B avec Ollama sur Hugging Face Spaces.
|
| 4 |
+
|
| 5 |
+
## 🚀 Déploiement rapide
|
| 6 |
+
|
| 7 |
+
1. **Allez sur [Hugging Face Spaces](https://huggingface.co/spaces)**
|
| 8 |
+
2. **Créez un nouvel espace** avec ces paramètres :
|
| 9 |
+
- Owner: Votre nom d'utilisateur
|
| 10 |
+
- Space name: `deepseek-coder`
|
| 11 |
+
- SDK: **Docker**
|
| 12 |
+
- License: MIT
|
| 13 |
+
- Visibility: Public ou Private
|
| 14 |
+
|
| 15 |
+
3. **Clonez ce dépôt** ou téléchargez les fichiers :
|
app.py
ADDED
|
@@ -0,0 +1,267 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import requests
|
| 3 |
+
import json
|
| 4 |
+
import time
|
| 5 |
+
from typing import Generator
|
| 6 |
+
import asyncio
|
| 7 |
+
import aiohttp
|
| 8 |
+
|
| 9 |
+
# Configuration
|
| 10 |
+
OLLAMA_URL = "http://localhost:11434"
|
| 11 |
+
MODEL_NAME = "deepseek-coder:1.3b"
|
| 12 |
+
CUSTOM_MODEL = "deepseek-coder-custom"
|
| 13 |
+
|
| 14 |
+
class OllamaClient:
|
| 15 |
+
def __init__(self):
|
| 16 |
+
self.base_url = OLLAMA_URL
|
| 17 |
+
|
| 18 |
+
def check_health(self) -> bool:
|
| 19 |
+
"""Vérifier si Ollama est en ligne"""
|
| 20 |
+
try:
|
| 21 |
+
response = requests.get(f"{self.base_url}/api/tags", timeout=5)
|
| 22 |
+
return response.status_code == 200
|
| 23 |
+
except:
|
| 24 |
+
return False
|
| 25 |
+
|
| 26 |
+
def generate(self, prompt: str, temperature: float = 0.2, max_tokens: int = 1024) -> str:
|
| 27 |
+
"""Générer du texte"""
|
| 28 |
+
try:
|
| 29 |
+
payload = {
|
| 30 |
+
"model": CUSTOM_MODEL,
|
| 31 |
+
"prompt": prompt,
|
| 32 |
+
"stream": False,
|
| 33 |
+
"options": {
|
| 34 |
+
"temperature": temperature,
|
| 35 |
+
"num_predict": max_tokens,
|
| 36 |
+
"top_p": 0.95,
|
| 37 |
+
"repeat_penalty": 1.1
|
| 38 |
+
}
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
response = requests.post(
|
| 42 |
+
f"{self.base_url}/api/generate",
|
| 43 |
+
json=payload,
|
| 44 |
+
timeout=120
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
if response.status_code == 200:
|
| 48 |
+
return response.json()["response"]
|
| 49 |
+
else:
|
| 50 |
+
return f"Erreur: {response.status_code} - {response.text}"
|
| 51 |
+
|
| 52 |
+
except Exception as e:
|
| 53 |
+
return f"Erreur de connexion: {str(e)}"
|
| 54 |
+
|
| 55 |
+
async def generate_stream(self, prompt: str, temperature: float = 0.2) -> Generator[str, None, None]:
|
| 56 |
+
"""Générer en streaming"""
|
| 57 |
+
payload = {
|
| 58 |
+
"model": CUSTOM_MODEL,
|
| 59 |
+
"prompt": prompt,
|
| 60 |
+
"stream": True,
|
| 61 |
+
"options": {
|
| 62 |
+
"temperature": temperature,
|
| 63 |
+
"num_predict": 1024
|
| 64 |
+
}
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
async with aiohttp.ClientSession() as session:
|
| 68 |
+
async with session.post(
|
| 69 |
+
f"{self.base_url}/api/generate",
|
| 70 |
+
json=payload,
|
| 71 |
+
timeout=60
|
| 72 |
+
) as response:
|
| 73 |
+
async for line in response.content:
|
| 74 |
+
if line:
|
| 75 |
+
try:
|
| 76 |
+
data = json.loads(line.decode('utf-8'))
|
| 77 |
+
yield data.get("response", "")
|
| 78 |
+
except:
|
| 79 |
+
continue
|
| 80 |
+
|
| 81 |
+
# Initialiser le client
|
| 82 |
+
client = OllamaClient()
|
| 83 |
+
|
| 84 |
+
# Fonctions pour Gradio
|
| 85 |
+
def generate_code(prompt, temperature, language):
|
| 86 |
+
"""Générer du code"""
|
| 87 |
+
if not prompt.strip():
|
| 88 |
+
return "❌ Veuillez entrer un prompt"
|
| 89 |
+
|
| 90 |
+
# Ajouter le contexte du langage
|
| 91 |
+
enhanced_prompt = prompt
|
| 92 |
+
if language != "auto":
|
| 93 |
+
enhanced_prompt = f"Écrire en {language}:\n{prompt}"
|
| 94 |
+
|
| 95 |
+
# Vérifier la santé d'Ollama
|
| 96 |
+
if not client.check_health():
|
| 97 |
+
return "⚠️ Ollama n'est pas disponible. Veuillez patienter..."
|
| 98 |
+
|
| 99 |
+
# Générer la réponse
|
| 100 |
+
result = client.generate(enhanced_prompt, temperature)
|
| 101 |
+
return result
|
| 102 |
+
|
| 103 |
+
async def generate_stream_ui(prompt, temperature):
|
| 104 |
+
"""Interface de streaming"""
|
| 105 |
+
if not prompt.strip():
|
| 106 |
+
yield "❌ Veuillez entrer un prompt"
|
| 107 |
+
return
|
| 108 |
+
|
| 109 |
+
full_response = ""
|
| 110 |
+
async for chunk in client.generate_stream(prompt, temperature):
|
| 111 |
+
full_response += chunk
|
| 112 |
+
yield full_response
|
| 113 |
+
|
| 114 |
+
def get_model_info():
|
| 115 |
+
"""Obtenir les informations du modèle"""
|
| 116 |
+
try:
|
| 117 |
+
response = requests.get(f"{OLLAMA_URL}/api/tags")
|
| 118 |
+
if response.status_code == 200:
|
| 119 |
+
models = response.json().get("models", [])
|
| 120 |
+
return "\n".join([f"📦 {m['name']} ({m.get('size', 'N/A')})" for m in models])
|
| 121 |
+
return "⚠️ Impossible de récupérer les informations"
|
| 122 |
+
except:
|
| 123 |
+
return "❌ Erreur de connexion"
|
| 124 |
+
|
| 125 |
+
# Interface Gradio
|
| 126 |
+
with gr.Blocks(
|
| 127 |
+
theme=gr.themes.Soft(),
|
| 128 |
+
title="🤖 DeepSeek-Coder 1.3B - Hugging Face",
|
| 129 |
+
css=".gradio-container {max-width: 900px !important}"
|
| 130 |
+
) as demo:
|
| 131 |
+
|
| 132 |
+
gr.Markdown("""
|
| 133 |
+
# 🚀 DeepSeek-Coder 1.3B
|
| 134 |
+
### Générateur de code IA local sur Hugging Face Spaces
|
| 135 |
+
|
| 136 |
+
Ce modèle fonctionne localement avec **Ollama**. Il génère du code dans plusieurs langages de programmation.
|
| 137 |
+
""")
|
| 138 |
+
|
| 139 |
+
with gr.Row():
|
| 140 |
+
with gr.Column(scale=2):
|
| 141 |
+
# Zone d'entrée
|
| 142 |
+
prompt = gr.Textbox(
|
| 143 |
+
label="📝 Prompt",
|
| 144 |
+
placeholder="Écrire une fonction Python qui calcule la factorielle...",
|
| 145 |
+
lines=6,
|
| 146 |
+
max_lines=10
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
with gr.Row():
|
| 150 |
+
temperature = gr.Slider(
|
| 151 |
+
label="🌡️ Température",
|
| 152 |
+
minimum=0.1,
|
| 153 |
+
maximum=1.0,
|
| 154 |
+
value=0.2,
|
| 155 |
+
step=0.1
|
| 156 |
+
)
|
| 157 |
+
|
| 158 |
+
language = gr.Dropdown(
|
| 159 |
+
label="💻 Langage",
|
| 160 |
+
choices=[
|
| 161 |
+
"auto", "python", "javascript", "java", "cpp",
|
| 162 |
+
"go", "rust", "html", "css", "sql", "bash"
|
| 163 |
+
],
|
| 164 |
+
value="auto"
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
with gr.Row():
|
| 168 |
+
generate_btn = gr.Button("✨ Générer", variant="primary")
|
| 169 |
+
stream_btn = gr.Button("⚡ Générer en streaming")
|
| 170 |
+
clear_btn = gr.Button("🧹 Effacer")
|
| 171 |
+
|
| 172 |
+
with gr.Column(scale=3):
|
| 173 |
+
# Zone de sortie
|
| 174 |
+
output = gr.Code(
|
| 175 |
+
label="📄 Code généré",
|
| 176 |
+
language="python",
|
| 177 |
+
lines=15,
|
| 178 |
+
interactive=False
|
| 179 |
+
)
|
| 180 |
+
|
| 181 |
+
# Zone de streaming
|
| 182 |
+
stream_output = gr.Textbox(
|
| 183 |
+
label="⚡ Streaming",
|
| 184 |
+
visible=False,
|
| 185 |
+
lines=15
|
| 186 |
+
)
|
| 187 |
+
|
| 188 |
+
# Section d'informations
|
| 189 |
+
with gr.Accordion("ℹ️ Informations système", open=False):
|
| 190 |
+
with gr.Row():
|
| 191 |
+
model_info = gr.Textbox(
|
| 192 |
+
label="Modèles disponibles",
|
| 193 |
+
value="Chargement...",
|
| 194 |
+
interactive=False
|
| 195 |
+
)
|
| 196 |
+
health_status = gr.Textbox(
|
| 197 |
+
label="Statut Ollama",
|
| 198 |
+
value="Vérification...",
|
| 199 |
+
interactive=False
|
| 200 |
+
)
|
| 201 |
+
|
| 202 |
+
refresh_btn = gr.Button("🔄 Actualiser")
|
| 203 |
+
|
| 204 |
+
# Section d'exemples
|
| 205 |
+
with gr.Accordion("📚 Exemples", open=True):
|
| 206 |
+
examples = gr.Examples(
|
| 207 |
+
examples=[
|
| 208 |
+
["Écrire une fonction qui inverse une chaîne de caractères", "python", 0.2],
|
| 209 |
+
["Créer un composant React pour un bouton", "javascript", 0.2],
|
| 210 |
+
["Implémenter une liste chaînée en C++", "cpp", 0.2],
|
| 211 |
+
["Faire une requête SQL pour trouver les utilisateurs actifs", "sql", 0.2],
|
| 212 |
+
["Script bash pour sauvegarder une base de données MySQL", "bash", 0.2]
|
| 213 |
+
],
|
| 214 |
+
inputs=[prompt, language, temperature],
|
| 215 |
+
outputs=output
|
| 216 |
+
)
|
| 217 |
+
|
| 218 |
+
# Événements
|
| 219 |
+
generate_btn.click(
|
| 220 |
+
fn=generate_code,
|
| 221 |
+
inputs=[prompt, temperature, language],
|
| 222 |
+
outputs=output
|
| 223 |
+
)
|
| 224 |
+
|
| 225 |
+
stream_btn.click(
|
| 226 |
+
fn=lambda: gr.update(visible=False),
|
| 227 |
+
outputs=output
|
| 228 |
+
).then(
|
| 229 |
+
fn=lambda: gr.update(visible=True),
|
| 230 |
+
outputs=stream_output
|
| 231 |
+
).then(
|
| 232 |
+
fn=generate_stream_ui,
|
| 233 |
+
inputs=[prompt, temperature],
|
| 234 |
+
outputs=stream_output
|
| 235 |
+
)
|
| 236 |
+
|
| 237 |
+
clear_btn.click(
|
| 238 |
+
fn=lambda: ("", "", ""),
|
| 239 |
+
outputs=[prompt, output, stream_output]
|
| 240 |
+
)
|
| 241 |
+
|
| 242 |
+
refresh_btn.click(
|
| 243 |
+
fn=get_model_info,
|
| 244 |
+
outputs=model_info
|
| 245 |
+
).then(
|
| 246 |
+
fn=lambda: "✅ En ligne" if client.check_health() else "❌ Hors ligne",
|
| 247 |
+
outputs=health_status
|
| 248 |
+
)
|
| 249 |
+
|
| 250 |
+
# Initialisation
|
| 251 |
+
demo.load(
|
| 252 |
+
fn=get_model_info,
|
| 253 |
+
outputs=model_info
|
| 254 |
+
).then(
|
| 255 |
+
fn=lambda: "✅ En ligne" if client.check_health() else "❌ Hors ligne",
|
| 256 |
+
outputs=health_status
|
| 257 |
+
)
|
| 258 |
+
|
| 259 |
+
if __name__ == "__main__":
|
| 260 |
+
# Démarrer Gradio
|
| 261 |
+
demo.launch(
|
| 262 |
+
server_name="0.0.0.0",
|
| 263 |
+
server_port=7860,
|
| 264 |
+
share=False,
|
| 265 |
+
debug=False,
|
| 266 |
+
show_error=True
|
| 267 |
+
)
|
auto_deploy.sh
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# auto_deploy.sh
|
| 2 |
+
echo "Choisissez votre option:"
|
| 3 |
+
echo "1) Google Colab (Gratuit, GPU)"
|
| 4 |
+
echo "2) Hugging Face Spaces (Gratuit, CPU)"
|
| 5 |
+
echo "3) VPS (Payant, ~6€/mois)"
|
| 6 |
+
echo "4) RunPod (Payant à l'usage)"
|
| 7 |
+
|
| 8 |
+
read -p "Votre choix: " choice
|
| 9 |
+
|
| 10 |
+
case $choice in
|
| 11 |
+
1)
|
| 12 |
+
echo "Ouvrez: https://colab.research.google.com"
|
| 13 |
+
echo "Copiez le notebook Colab fourni"
|
| 14 |
+
;;
|
| 15 |
+
2)
|
| 16 |
+
echo "1. Allez sur huggingface.co/spaces"
|
| 17 |
+
echo "2. New Space -> Docker"
|
| 18 |
+
echo "3. Upload les fichiers Dockerfile et app.py"
|
| 19 |
+
;;
|
| 20 |
+
3)
|
| 21 |
+
echo "Commande pour VPS Ubuntu:"
|
| 22 |
+
echo "wget https://raw.githubusercontent.com/ollama/ollama/main/install.sh && sh install.sh"
|
| 23 |
+
echo "ollama pull deepseek-coder:1.3b"
|
| 24 |
+
;;
|
| 25 |
+
4)
|
| 26 |
+
echo "1. Créez un compte RunPod.io"
|
| 27 |
+
echo "2. Deploy -> Serverless"
|
| 28 |
+
echo "3. Upload le Dockerfile"
|
| 29 |
+
;;
|
| 30 |
+
esac
|
requirements.txt
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio==4.19.1
|
| 2 |
+
requests==2.31.0
|
| 3 |
+
aiohttp==3.9.1
|
| 4 |
+
websockets==12.0
|
| 5 |
+
python-dotenv==1.0.0
|
start.sh
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
echo "🚀 Démarrage de l'application sur Hugging Face Spaces..."
|
| 4 |
+
|
| 5 |
+
# Démarrer Ollama en arrière-plan
|
| 6 |
+
echo "🔧 Démarrage du serveur Ollama..."
|
| 7 |
+
ollama serve &
|
| 8 |
+
|
| 9 |
+
# Attendre que le serveur soit prêt
|
| 10 |
+
echo "⏳ Attente du démarrage du serveur..."
|
| 11 |
+
sleep 20
|
| 12 |
+
|
| 13 |
+
# Vérifier si Ollama répond
|
| 14 |
+
if curl -s http://localhost:11434/api/tags > /dev/null 2>&1; then
|
| 15 |
+
echo "✅ Ollama est en cours d'exécution"
|
| 16 |
+
else
|
| 17 |
+
echo "❌ Ollama ne répond pas, redémarrage..."
|
| 18 |
+
pkill ollama
|
| 19 |
+
sleep 5
|
| 20 |
+
ollama serve &
|
| 21 |
+
sleep 20
|
| 22 |
+
fi
|
| 23 |
+
|
| 24 |
+
# Télécharger le modèle DeepSeek-Coder
|
| 25 |
+
echo "📥 Téléchargement du modèle DeepSeek-Coder 1.3B..."
|
| 26 |
+
echo "⚠️ Cela peut prendre plusieurs minutes (1.4GB)..."
|
| 27 |
+
ollama pull deepseek-coder:1.3b
|
| 28 |
+
|
| 29 |
+
# Créer un Modelfile personnalisé
|
| 30 |
+
echo "⚙️ Configuration du modèle..."
|
| 31 |
+
cat > /home/user/Modelfile << 'EOF'
|
| 32 |
+
FROM deepseek-coder:1.3b
|
| 33 |
+
|
| 34 |
+
# Paramètres optimisés pour le code
|
| 35 |
+
PARAMETER temperature 0.2
|
| 36 |
+
PARAMETER top_p 0.95
|
| 37 |
+
PARAMETER top_k 40
|
| 38 |
+
PARAMETER num_predict 1024
|
| 39 |
+
PARAMETER repeat_penalty 1.1
|
| 40 |
+
EOF
|
| 41 |
+
|
| 42 |
+
# Créer le modèle personnalisé
|
| 43 |
+
ollama create deepseek-coder-custom -f /home/user/Modelfile
|
| 44 |
+
|
| 45 |
+
# Lancer l'application Gradio
|
| 46 |
+
echo "🌐 Démarrage de l'interface web..."
|
| 47 |
+
python app.py
|