Spaces:
Sleeping
Sleeping
pakito312
commited on
Commit
·
4456a38
1
Parent(s):
ad95241
update
Browse files- Dockerfile +5 -32
- app.py +52 -259
- requirements.txt +2 -2
Dockerfile
CHANGED
|
@@ -1,43 +1,16 @@
|
|
| 1 |
-
# Dockerfile optimisé pour Hugging Face Spaces
|
| 2 |
FROM python:3.10-slim
|
| 3 |
|
| 4 |
-
# Variables d'environnement
|
| 5 |
-
ENV DEBIAN_FRONTEND=noninteractive \
|
| 6 |
-
OLLAMA_HOST=0.0.0.0 \
|
| 7 |
-
OLLAMA_KEEP_ALIVE=24h \
|
| 8 |
-
HF_SPACE=true
|
| 9 |
-
|
| 10 |
# Installer les dépendances système
|
| 11 |
-
RUN apt-get update && apt-get install -y
|
| 12 |
-
curl \
|
| 13 |
-
ca-certificates \
|
| 14 |
-
zstd \
|
| 15 |
-
gnupg \
|
| 16 |
-
wget \
|
| 17 |
-
&& apt-get clean \
|
| 18 |
-
&& rm -rf /var/lib/apt/lists/*
|
| 19 |
|
| 20 |
# Installer Ollama
|
| 21 |
RUN curl -fsSL https://ollama.ai/install.sh | sh
|
| 22 |
|
| 23 |
-
# Créer un utilisateur non-root pour Hugging Face
|
| 24 |
-
RUN useradd -m -u 1000 -s /bin/bash user
|
| 25 |
-
USER user
|
| 26 |
-
WORKDIR /home/user
|
| 27 |
-
|
| 28 |
# Copier l'application
|
| 29 |
-
COPY
|
| 30 |
-
COPY --chown=user:user requirements.txt .
|
| 31 |
|
| 32 |
-
#
|
| 33 |
-
RUN pip install --no-cache-dir --user -r requirements.txt
|
| 34 |
-
|
| 35 |
-
# Exposer les ports (Hugging Face utilise 7860)
|
| 36 |
EXPOSE 7860
|
| 37 |
-
EXPOSE 11434
|
| 38 |
-
|
| 39 |
-
# Script de démarrage optimisé pour HF
|
| 40 |
-
COPY --chown=user:user start.sh .
|
| 41 |
-
RUN chmod +x start.sh
|
| 42 |
|
| 43 |
-
|
|
|
|
|
|
|
|
|
| 1 |
FROM python:3.10-slim
|
| 2 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
# Installer les dépendances système
|
| 4 |
+
RUN apt-get update && apt-get install -y curl zstd && rm -rf /var/lib/apt/lists/*
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
# Installer Ollama
|
| 7 |
RUN curl -fsSL https://ollama.ai/install.sh | sh
|
| 8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
# Copier l'application
|
| 10 |
+
COPY app.py .
|
|
|
|
| 11 |
|
| 12 |
+
# Exposer le port
|
|
|
|
|
|
|
|
|
|
| 13 |
EXPOSE 7860
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
|
| 15 |
+
# Démarrer
|
| 16 |
+
CMD ["sh", "-c", "ollama serve & sleep 30 && ollama pull deepseek-coder:1.3b && python app.py"]
|
app.py
CHANGED
|
@@ -1,267 +1,60 @@
|
|
| 1 |
-
|
| 2 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
import json
|
|
|
|
|
|
|
| 4 |
import time
|
| 5 |
-
from typing import Generator
|
| 6 |
-
import asyncio
|
| 7 |
-
import aiohttp
|
| 8 |
|
| 9 |
-
#
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
}
|
| 40 |
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
|
|
|
| 45 |
)
|
| 46 |
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
except Exception as e:
|
| 53 |
-
return f"Erreur de connexion: {str(e)}"
|
| 54 |
-
|
| 55 |
-
async def generate_stream(self, prompt: str, temperature: float = 0.2) -> Generator[str, None, None]:
|
| 56 |
-
"""Générer en streaming"""
|
| 57 |
-
payload = {
|
| 58 |
-
"model": CUSTOM_MODEL,
|
| 59 |
-
"prompt": prompt,
|
| 60 |
-
"stream": True,
|
| 61 |
-
"options": {
|
| 62 |
-
"temperature": temperature,
|
| 63 |
-
"num_predict": 1024
|
| 64 |
-
}
|
| 65 |
-
}
|
| 66 |
-
|
| 67 |
-
async with aiohttp.ClientSession() as session:
|
| 68 |
-
async with session.post(
|
| 69 |
-
f"{self.base_url}/api/generate",
|
| 70 |
-
json=payload,
|
| 71 |
-
timeout=60
|
| 72 |
-
) as response:
|
| 73 |
-
async for line in response.content:
|
| 74 |
-
if line:
|
| 75 |
-
try:
|
| 76 |
-
data = json.loads(line.decode('utf-8'))
|
| 77 |
-
yield data.get("response", "")
|
| 78 |
-
except:
|
| 79 |
-
continue
|
| 80 |
-
|
| 81 |
-
# Initialiser le client
|
| 82 |
-
client = OllamaClient()
|
| 83 |
-
|
| 84 |
-
# Fonctions pour Gradio
|
| 85 |
-
def generate_code(prompt, temperature, language):
|
| 86 |
-
"""Générer du code"""
|
| 87 |
-
if not prompt.strip():
|
| 88 |
-
return "❌ Veuillez entrer un prompt"
|
| 89 |
-
|
| 90 |
-
# Ajouter le contexte du langage
|
| 91 |
-
enhanced_prompt = prompt
|
| 92 |
-
if language != "auto":
|
| 93 |
-
enhanced_prompt = f"Écrire en {language}:\n{prompt}"
|
| 94 |
-
|
| 95 |
-
# Vérifier la santé d'Ollama
|
| 96 |
-
if not client.check_health():
|
| 97 |
-
return "⚠️ Ollama n'est pas disponible. Veuillez patienter..."
|
| 98 |
-
|
| 99 |
-
# Générer la réponse
|
| 100 |
-
result = client.generate(enhanced_prompt, temperature)
|
| 101 |
-
return result
|
| 102 |
-
|
| 103 |
-
async def generate_stream_ui(prompt, temperature):
|
| 104 |
-
"""Interface de streaming"""
|
| 105 |
-
if not prompt.strip():
|
| 106 |
-
yield "❌ Veuillez entrer un prompt"
|
| 107 |
-
return
|
| 108 |
-
|
| 109 |
-
full_response = ""
|
| 110 |
-
async for chunk in client.generate_stream(prompt, temperature):
|
| 111 |
-
full_response += chunk
|
| 112 |
-
yield full_response
|
| 113 |
-
|
| 114 |
-
def get_model_info():
|
| 115 |
-
"""Obtenir les informations du modèle"""
|
| 116 |
-
try:
|
| 117 |
-
response = requests.get(f"{OLLAMA_URL}/api/tags")
|
| 118 |
-
if response.status_code == 200:
|
| 119 |
-
models = response.json().get("models", [])
|
| 120 |
-
return "\n".join([f"📦 {m['name']} ({m.get('size', 'N/A')})" for m in models])
|
| 121 |
-
return "⚠️ Impossible de récupérer les informations"
|
| 122 |
-
except:
|
| 123 |
-
return "❌ Erreur de connexion"
|
| 124 |
-
|
| 125 |
-
# Interface Gradio
|
| 126 |
-
with gr.Blocks(
|
| 127 |
-
theme=gr.themes.Soft(),
|
| 128 |
-
title="🤖 DeepSeek-Coder 1.3B - Hugging Face",
|
| 129 |
-
css=".gradio-container {max-width: 900px !important}"
|
| 130 |
-
) as demo:
|
| 131 |
-
|
| 132 |
-
gr.Markdown("""
|
| 133 |
-
# 🚀 DeepSeek-Coder 1.3B
|
| 134 |
-
### Générateur de code IA local sur Hugging Face Spaces
|
| 135 |
-
|
| 136 |
-
Ce modèle fonctionne localement avec **Ollama**. Il génère du code dans plusieurs langages de programmation.
|
| 137 |
-
""")
|
| 138 |
-
|
| 139 |
-
with gr.Row():
|
| 140 |
-
with gr.Column(scale=2):
|
| 141 |
-
# Zone d'entrée
|
| 142 |
-
prompt = gr.Textbox(
|
| 143 |
-
label="📝 Prompt",
|
| 144 |
-
placeholder="Écrire une fonction Python qui calcule la factorielle...",
|
| 145 |
-
lines=6,
|
| 146 |
-
max_lines=10
|
| 147 |
-
)
|
| 148 |
-
|
| 149 |
-
with gr.Row():
|
| 150 |
-
temperature = gr.Slider(
|
| 151 |
-
label="🌡️ Température",
|
| 152 |
-
minimum=0.1,
|
| 153 |
-
maximum=1.0,
|
| 154 |
-
value=0.2,
|
| 155 |
-
step=0.1
|
| 156 |
-
)
|
| 157 |
-
|
| 158 |
-
language = gr.Dropdown(
|
| 159 |
-
label="💻 Langage",
|
| 160 |
-
choices=[
|
| 161 |
-
"auto", "python", "javascript", "java", "cpp",
|
| 162 |
-
"go", "rust", "html", "css", "sql", "bash"
|
| 163 |
-
],
|
| 164 |
-
value="auto"
|
| 165 |
-
)
|
| 166 |
-
|
| 167 |
-
with gr.Row():
|
| 168 |
-
generate_btn = gr.Button("✨ Générer", variant="primary")
|
| 169 |
-
stream_btn = gr.Button("⚡ Générer en streaming")
|
| 170 |
-
clear_btn = gr.Button("🧹 Effacer")
|
| 171 |
-
|
| 172 |
-
with gr.Column(scale=3):
|
| 173 |
-
# Zone de sortie
|
| 174 |
-
output = gr.Code(
|
| 175 |
-
label="📄 Code généré",
|
| 176 |
-
language="python",
|
| 177 |
-
lines=15,
|
| 178 |
-
interactive=False
|
| 179 |
-
)
|
| 180 |
-
|
| 181 |
-
# Zone de streaming
|
| 182 |
-
stream_output = gr.Textbox(
|
| 183 |
-
label="⚡ Streaming",
|
| 184 |
-
visible=False,
|
| 185 |
-
lines=15
|
| 186 |
-
)
|
| 187 |
-
|
| 188 |
-
# Section d'informations
|
| 189 |
-
with gr.Accordion("ℹ️ Informations système", open=False):
|
| 190 |
-
with gr.Row():
|
| 191 |
-
model_info = gr.Textbox(
|
| 192 |
-
label="Modèles disponibles",
|
| 193 |
-
value="Chargement...",
|
| 194 |
-
interactive=False
|
| 195 |
-
)
|
| 196 |
-
health_status = gr.Textbox(
|
| 197 |
-
label="Statut Ollama",
|
| 198 |
-
value="Vérification...",
|
| 199 |
-
interactive=False
|
| 200 |
-
)
|
| 201 |
-
|
| 202 |
-
refresh_btn = gr.Button("🔄 Actualiser")
|
| 203 |
-
|
| 204 |
-
# Section d'exemples
|
| 205 |
-
with gr.Accordion("📚 Exemples", open=True):
|
| 206 |
-
examples = gr.Examples(
|
| 207 |
-
examples=[
|
| 208 |
-
["Écrire une fonction qui inverse une chaîne de caractères", "python", 0.2],
|
| 209 |
-
["Créer un composant React pour un bouton", "javascript", 0.2],
|
| 210 |
-
["Implémenter une liste chaînée en C++", "cpp", 0.2],
|
| 211 |
-
["Faire une requête SQL pour trouver les utilisateurs actifs", "sql", 0.2],
|
| 212 |
-
["Script bash pour sauvegarder une base de données MySQL", "bash", 0.2]
|
| 213 |
-
],
|
| 214 |
-
inputs=[prompt, language, temperature],
|
| 215 |
-
outputs=output
|
| 216 |
-
)
|
| 217 |
-
|
| 218 |
-
# Événements
|
| 219 |
-
generate_btn.click(
|
| 220 |
-
fn=generate_code,
|
| 221 |
-
inputs=[prompt, temperature, language],
|
| 222 |
-
outputs=output
|
| 223 |
-
)
|
| 224 |
-
|
| 225 |
-
stream_btn.click(
|
| 226 |
-
fn=lambda: gr.update(visible=False),
|
| 227 |
-
outputs=output
|
| 228 |
-
).then(
|
| 229 |
-
fn=lambda: gr.update(visible=True),
|
| 230 |
-
outputs=stream_output
|
| 231 |
-
).then(
|
| 232 |
-
fn=generate_stream_ui,
|
| 233 |
-
inputs=[prompt, temperature],
|
| 234 |
-
outputs=stream_output
|
| 235 |
-
)
|
| 236 |
-
|
| 237 |
-
clear_btn.click(
|
| 238 |
-
fn=lambda: ("", "", ""),
|
| 239 |
-
outputs=[prompt, output, stream_output]
|
| 240 |
-
)
|
| 241 |
-
|
| 242 |
-
refresh_btn.click(
|
| 243 |
-
fn=get_model_info,
|
| 244 |
-
outputs=model_info
|
| 245 |
-
).then(
|
| 246 |
-
fn=lambda: "✅ En ligne" if client.check_health() else "❌ Hors ligne",
|
| 247 |
-
outputs=health_status
|
| 248 |
-
)
|
| 249 |
-
|
| 250 |
-
# Initialisation
|
| 251 |
-
demo.load(
|
| 252 |
-
fn=get_model_info,
|
| 253 |
-
outputs=model_info
|
| 254 |
-
).then(
|
| 255 |
-
fn=lambda: "✅ En ligne" if client.check_health() else "❌ Hors ligne",
|
| 256 |
-
outputs=health_status
|
| 257 |
-
)
|
| 258 |
|
|
|
|
| 259 |
if __name__ == "__main__":
|
| 260 |
-
|
| 261 |
-
|
| 262 |
-
|
| 263 |
-
|
| 264 |
-
share=False,
|
| 265 |
-
debug=False,
|
| 266 |
-
show_error=True
|
| 267 |
-
)
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Serveur HTTP minimal pour DeepSeek-Coder
|
| 4 |
+
"""
|
| 5 |
+
import http.server
|
| 6 |
+
import socketserver
|
| 7 |
import json
|
| 8 |
+
import subprocess
|
| 9 |
+
import threading
|
| 10 |
import time
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
+
# Démarrer Ollama
|
| 13 |
+
def start_ollama():
|
| 14 |
+
subprocess.run(["ollama", "serve"])
|
| 15 |
+
|
| 16 |
+
# Lancer Ollama en thread
|
| 17 |
+
threading.Thread(target=start_ollama, daemon=True).start()
|
| 18 |
+
time.sleep(30)
|
| 19 |
+
|
| 20 |
+
# Télécharger le modèle
|
| 21 |
+
print("Téléchargement du modèle...")
|
| 22 |
+
subprocess.run(["ollama", "pull", "deepseek-coder:1.3b"], capture_output=True)
|
| 23 |
+
|
| 24 |
+
# HTML simple
|
| 25 |
+
HTML = open("index.html").read() if os.path.exists("index.html") else "<h1>DeepSeek-Coder 1.3B</h1>"
|
| 26 |
+
|
| 27 |
+
class OllamaHandler(http.server.SimpleHTTPRequestHandler):
|
| 28 |
+
def do_GET(self):
|
| 29 |
+
if self.path == "/":
|
| 30 |
+
self.send_response(200)
|
| 31 |
+
self.send_header("Content-type", "text/html")
|
| 32 |
+
self.end_headers()
|
| 33 |
+
self.wfile.write(HTML.encode())
|
| 34 |
+
else:
|
| 35 |
+
super().do_GET()
|
| 36 |
+
|
| 37 |
+
def do_POST(self):
|
| 38 |
+
if self.path == "/generate":
|
| 39 |
+
content_length = int(self.headers['Content-Length'])
|
| 40 |
+
post_data = self.rfile.read(content_length)
|
| 41 |
+
data = json.loads(post_data)
|
|
|
|
| 42 |
|
| 43 |
+
# Appeler Ollama
|
| 44 |
+
result = subprocess.run(
|
| 45 |
+
["ollama", "run", "deepseek-coder:1.3b", data.get("prompt", "")],
|
| 46 |
+
capture_output=True,
|
| 47 |
+
text=True
|
| 48 |
)
|
| 49 |
|
| 50 |
+
self.send_response(200)
|
| 51 |
+
self.send_header("Content-type", "application/json")
|
| 52 |
+
self.end_headers()
|
| 53 |
+
self.wfile.write(json.dumps({"response": result.stdout}).encode())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
|
| 55 |
+
# Démarrer le serveur
|
| 56 |
if __name__ == "__main__":
|
| 57 |
+
PORT = 7860
|
| 58 |
+
with socketserver.TCPServer(("", PORT), OllamaHandler) as httpd:
|
| 59 |
+
print(f"Serveur démarré sur le port {PORT}")
|
| 60 |
+
httpd.serve_forever()
|
|
|
|
|
|
|
|
|
|
|
|
requirements.txt
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
gradio==
|
| 2 |
requests==2.31.0
|
| 3 |
aiohttp==3.9.1
|
| 4 |
-
|
|
|
|
| 1 |
+
gradio==3.50.2
|
| 2 |
requests==2.31.0
|
| 3 |
aiohttp==3.9.1
|
| 4 |
+
huggingface-hub==0.20.3
|