Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,83 +1,162 @@
|
|
| 1 |
import os
|
| 2 |
import gradio as gr
|
| 3 |
from huggingface_hub import InferenceClient
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
|
| 5 |
# Récupérer le token depuis les variables d'environnement (Secrets)
|
| 6 |
HF_TOKEN = os.environ.get("HF_TOKEN")
|
| 7 |
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
if not HF_TOKEN:
|
| 20 |
-
yield "
|
| 21 |
return
|
| 22 |
|
| 23 |
try:
|
| 24 |
client = InferenceClient(token=HF_TOKEN, model="openai/gpt-oss-20b")
|
| 25 |
-
|
| 26 |
-
messages = [{"role": "system", "content":
|
| 27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
messages.append({"role": "user", "content": message})
|
| 29 |
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
for
|
| 33 |
messages,
|
| 34 |
max_tokens=max_tokens,
|
| 35 |
stream=True,
|
| 36 |
temperature=temperature,
|
| 37 |
top_p=top_p,
|
| 38 |
):
|
| 39 |
-
choices
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
token
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
except Exception as e:
|
| 48 |
-
|
| 49 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 50 |
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
"""
|
| 54 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
respond,
|
| 56 |
-
type="messages",
|
| 57 |
additional_inputs=[
|
| 58 |
-
gr.Textbox(value=
|
| 59 |
-
gr.Slider(minimum=1, maximum=
|
| 60 |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
| 61 |
-
gr.Slider(
|
| 62 |
-
minimum=0.1,
|
| 63 |
-
maximum=1.0,
|
| 64 |
-
value=0.95,
|
| 65 |
-
step=0.05,
|
| 66 |
-
label="Top-p (nucleus sampling)",
|
| 67 |
-
),
|
| 68 |
],
|
|
|
|
|
|
|
| 69 |
)
|
| 70 |
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
gr.Markdown(f"Token configuré: {'✅ Oui' if HF_TOKEN else '❌ Non'}")
|
| 74 |
-
|
| 75 |
-
# Vous pouvez retirer le LoginButton puisque nous utilisons le token direct
|
| 76 |
-
# with gr.Sidebar():
|
| 77 |
-
# gr.LoginButton()
|
| 78 |
-
|
| 79 |
-
chatbot.render()
|
| 80 |
-
|
| 81 |
|
| 82 |
if __name__ == "__main__":
|
| 83 |
-
|
|
|
|
|
|
| 1 |
import os
|
| 2 |
import gradio as gr
|
| 3 |
from huggingface_hub import InferenceClient
|
| 4 |
+
from fastapi import FastAPI, Request
|
| 5 |
+
from fastapi.responses import StreamingResponse
|
| 6 |
+
import json
|
| 7 |
+
from typing import Generator
|
| 8 |
+
import asyncio
|
| 9 |
|
| 10 |
# Récupérer le token depuis les variables d'environnement (Secrets)
|
| 11 |
HF_TOKEN = os.environ.get("HF_TOKEN")
|
| 12 |
|
| 13 |
+
# Définir un message système
|
| 14 |
+
SYSTEM_MESSAGE = """
|
| 15 |
+
Tu es TeachEase, un enseignant virtuel conçu pour aider les élèves à comprendre leurs cours, faire leurs exercices et devoirs.
|
| 16 |
+
Ton rôle est d'expliquer les concepts de manière claire et pédagogique, de fournir des exemples concrets et de poser des questions pour vérifier la compréhension.
|
| 17 |
+
**Instructions spéciales :**
|
| 18 |
+
1. **Salutations :** Réponds de manière courte et amicale.
|
| 19 |
+
2. **Expressions mathématiques et scientifiques :** Utilise le format LaTeX.
|
| 20 |
+
3. **Traduction et réponses multilingues :** Adapte ta réponse en fonction de la langue demandée.
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
# Créer l'application FastAPI
|
| 24 |
+
app = FastAPI(title="TeachEase API")
|
| 25 |
+
|
| 26 |
+
def detect_language_request(message: str) -> str:
|
| 27 |
+
"""Détecte la langue demandée"""
|
| 28 |
+
language_requests = {
|
| 29 |
+
"anglais": "en", "english": "en", "espagnol": "es", "spanish": "es",
|
| 30 |
+
"allemand": "de", "german": "de", "portugais": "pt", "portuguese": "pt",
|
| 31 |
+
"français": "fr", "french": "fr", "italien": "it", "italian": "it",
|
| 32 |
+
"chinois": "zh", "chinese": "zh", "japonais": "ja", "japanese": "ja",
|
| 33 |
+
"russe": "ru",
|
| 34 |
+
}
|
| 35 |
+
for keyword, lang_code in language_requests.items():
|
| 36 |
+
if keyword in message.lower():
|
| 37 |
+
return lang_code
|
| 38 |
+
return None
|
| 39 |
+
|
| 40 |
+
async def generate_stream_response(message: str, history: list = None, max_tokens: int = 256, temperature: float = 0.7, top_p: float = 0.95) -> Generator[str, None, None]:
|
| 41 |
+
"""Génère une réponse en streaming"""
|
| 42 |
if not HF_TOKEN:
|
| 43 |
+
yield json.dumps({"error": "HF_TOKEN non configuré"})
|
| 44 |
return
|
| 45 |
|
| 46 |
try:
|
| 47 |
client = InferenceClient(token=HF_TOKEN, model="openai/gpt-oss-20b")
|
| 48 |
+
|
| 49 |
+
messages = [{"role": "system", "content": SYSTEM_MESSAGE}]
|
| 50 |
+
|
| 51 |
+
# Ajouter l'historique si disponible
|
| 52 |
+
if history:
|
| 53 |
+
for exchange in history[-3:]: # Limiter aux 3 derniers échanges
|
| 54 |
+
if exchange.get("user"):
|
| 55 |
+
messages.append({"role": "user", "content": exchange["user"]})
|
| 56 |
+
if exchange.get("assistant"):
|
| 57 |
+
messages.append({"role": "assistant", "content": exchange["assistant"]})
|
| 58 |
+
|
| 59 |
messages.append({"role": "user", "content": message})
|
| 60 |
|
| 61 |
+
full_response = ""
|
| 62 |
+
|
| 63 |
+
for chunk in client.chat_completion(
|
| 64 |
messages,
|
| 65 |
max_tokens=max_tokens,
|
| 66 |
stream=True,
|
| 67 |
temperature=temperature,
|
| 68 |
top_p=top_p,
|
| 69 |
):
|
| 70 |
+
if chunk.choices and chunk.choices[0].delta.content:
|
| 71 |
+
token = chunk.choices[0].delta.content
|
| 72 |
+
full_response += token
|
| 73 |
+
# Envoyer chaque token au fur et à mesure
|
| 74 |
+
yield f"data: {json.dumps({'token': token, 'full_response': full_response})}\n\n"
|
| 75 |
+
await asyncio.sleep(0.01) # Petit délai pour le streaming
|
| 76 |
+
|
| 77 |
+
# Signal de fin
|
| 78 |
+
yield f"data: {json.dumps({'done': True, 'full_response': full_response})}\n\n"
|
| 79 |
+
|
| 80 |
+
except Exception as e:
|
| 81 |
+
yield f"data: {json.dumps({'error': str(e)})}\n\n"
|
| 82 |
|
| 83 |
+
@app.post("/api/chat/stream")
|
| 84 |
+
async def chat_stream(request: Request):
|
| 85 |
+
"""Endpoint API pour le streaming"""
|
| 86 |
+
try:
|
| 87 |
+
data = await request.json()
|
| 88 |
+
message = data.get("message", "")
|
| 89 |
+
history = data.get("history", [])
|
| 90 |
+
max_tokens = data.get("max_tokens", 256)
|
| 91 |
+
temperature = data.get("temperature", 0.7)
|
| 92 |
+
top_p = data.get("top_p", 0.95)
|
| 93 |
+
|
| 94 |
+
return StreamingResponse(
|
| 95 |
+
generate_stream_response(message, history, max_tokens, temperature, top_p),
|
| 96 |
+
media_type="text/event-stream",
|
| 97 |
+
headers={
|
| 98 |
+
"Access-Control-Allow-Origin": "*",
|
| 99 |
+
"Access-Control-Allow-Methods": "POST, OPTIONS",
|
| 100 |
+
"Access-Control-Allow-Headers": "Content-Type",
|
| 101 |
+
}
|
| 102 |
+
)
|
| 103 |
except Exception as e:
|
| 104 |
+
return {"error": str(e)}
|
| 105 |
|
| 106 |
+
@app.options("/api/chat/stream")
|
| 107 |
+
async def options_chat_stream():
|
| 108 |
+
"""Gérer les requêtes OPTIONS pour CORS"""
|
| 109 |
+
return {
|
| 110 |
+
"Access-Control-Allow-Origin": "*",
|
| 111 |
+
"Access-Control-Allow-Methods": "POST, OPTIONS",
|
| 112 |
+
"Access-Control-Allow-Headers": "Content-Type",
|
| 113 |
+
}
|
| 114 |
|
| 115 |
+
# Interface Gradio (optionnelle)
|
| 116 |
+
def respond(message, history, system_message=SYSTEM_MESSAGE, max_tokens=256, temperature=0.7, top_p=0.95):
|
| 117 |
+
"""Fonction pour l'interface Gradio"""
|
| 118 |
+
if not HF_TOKEN:
|
| 119 |
+
yield "❌ Erreur : HF_TOKEN non configuré"
|
| 120 |
+
return
|
| 121 |
+
|
| 122 |
+
try:
|
| 123 |
+
client = InferenceClient(token=HF_TOKEN, model="openai/gpt-oss-20b")
|
| 124 |
+
messages = [{"role": "system", "content": system_message}]
|
| 125 |
+
|
| 126 |
+
for val in history[-3:]:
|
| 127 |
+
if val[0]: messages.append({"role": "user", "content": val[0]})
|
| 128 |
+
if val[1]: messages.append({"role": "assistant", "content": val[1]})
|
| 129 |
+
|
| 130 |
+
messages.append({"role": "user", "content": message})
|
| 131 |
+
response = ""
|
| 132 |
+
|
| 133 |
+
for message in client.chat_completion(
|
| 134 |
+
messages, max_tokens=max_tokens, stream=True, temperature=temperature, top_p=top_p
|
| 135 |
+
):
|
| 136 |
+
if message.choices and message.choices[0].delta.content:
|
| 137 |
+
token = message.choices[0].delta.content
|
| 138 |
+
response += token
|
| 139 |
+
yield response
|
| 140 |
+
|
| 141 |
+
except Exception as e:
|
| 142 |
+
yield f"❌ Erreur : {str(e)}"
|
| 143 |
+
|
| 144 |
+
# Configuration de l'interface Gradio
|
| 145 |
+
demo = gr.ChatInterface(
|
| 146 |
respond,
|
|
|
|
| 147 |
additional_inputs=[
|
| 148 |
+
gr.Textbox(value=SYSTEM_MESSAGE, label="System message"),
|
| 149 |
+
gr.Slider(minimum=1, maximum=512, value=256, step=1, label="Max new tokens"),
|
| 150 |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
| 151 |
+
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p"),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 152 |
],
|
| 153 |
+
title="TeachEase - Enseignant Virtuel",
|
| 154 |
+
description="Bienvenue sur TeachEase ! API streaming disponible sur /api/chat/stream"
|
| 155 |
)
|
| 156 |
|
| 157 |
+
# Lancer avec Gradio App
|
| 158 |
+
app = gr.mount_gradio_app(app, demo, path="/")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 159 |
|
| 160 |
if __name__ == "__main__":
|
| 161 |
+
import uvicorn
|
| 162 |
+
uvicorn.run(app, host="0.0.0.0", port=7860)
|