Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,51 +1,49 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
-
|
|
|
|
| 3 |
import os
|
| 4 |
|
| 5 |
-
# 🔹
|
| 6 |
hf_token = os.environ.get("HF_TOKEN")
|
| 7 |
if hf_token is None:
|
| 8 |
-
raise ValueError("⚠️
|
| 9 |
-
"Ajoute-le dans les secrets du Space.")
|
| 10 |
|
| 11 |
# 🔹 ID du modèle
|
| 12 |
model_id = "SafaaAI/final_llm_darija_fr_tech"
|
| 13 |
|
| 14 |
-
# 🔹
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
token=hf_token,
|
| 19 |
-
|
| 20 |
-
|
| 21 |
)
|
| 22 |
|
| 23 |
-
# 🔹 Fonction de
|
| 24 |
def chat_with_model(message, history):
|
| 25 |
history = history or []
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
history.append((message, response))
|
| 40 |
return history, history
|
| 41 |
|
| 42 |
-
# 🔹
|
| 43 |
with gr.Blocks() as demo:
|
| 44 |
-
gr.Markdown("## 💬
|
| 45 |
-
|
| 46 |
chatbot = gr.Chatbot(type="messages")
|
| 47 |
msg = gr.Textbox(label="Écris ton message ici")
|
| 48 |
-
clear = gr.Button("🧹 Effacer
|
| 49 |
|
| 50 |
state = gr.State([])
|
| 51 |
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
import torch
|
| 3 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 4 |
import os
|
| 5 |
|
| 6 |
+
# 🔹 Token Hugging Face
|
| 7 |
hf_token = os.environ.get("HF_TOKEN")
|
| 8 |
if hf_token is None:
|
| 9 |
+
raise ValueError("⚠️ HF_TOKEN manquant dans les secrets du Space.")
|
|
|
|
| 10 |
|
| 11 |
# 🔹 ID du modèle
|
| 12 |
model_id = "SafaaAI/final_llm_darija_fr_tech"
|
| 13 |
|
| 14 |
+
# 🔹 Charger tokenizer + modèle (forcé en text-only)
|
| 15 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id, token=hf_token, trust_remote_code=True)
|
| 16 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 17 |
+
model_id,
|
| 18 |
token=hf_token,
|
| 19 |
+
trust_remote_code=True,
|
| 20 |
+
device_map="auto"
|
| 21 |
)
|
| 22 |
|
| 23 |
+
# 🔹 Fonction de génération texte
|
| 24 |
def chat_with_model(message, history):
|
| 25 |
history = history or []
|
| 26 |
+
inputs = tokenizer(message, return_tensors="pt").to(model.device)
|
| 27 |
+
|
| 28 |
+
with torch.no_grad():
|
| 29 |
+
outputs = model.generate(
|
| 30 |
+
**inputs,
|
| 31 |
+
max_new_tokens=200,
|
| 32 |
+
do_sample=True,
|
| 33 |
+
temperature=0.7,
|
| 34 |
+
top_p=0.9
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
|
|
|
| 38 |
history.append((message, response))
|
| 39 |
return history, history
|
| 40 |
|
| 41 |
+
# 🔹 UI Gradio
|
| 42 |
with gr.Blocks() as demo:
|
| 43 |
+
gr.Markdown("## 💬 SafaaAI - Multimodal LLM (mode texte seulement)")
|
|
|
|
| 44 |
chatbot = gr.Chatbot(type="messages")
|
| 45 |
msg = gr.Textbox(label="Écris ton message ici")
|
| 46 |
+
clear = gr.Button("🧹 Effacer")
|
| 47 |
|
| 48 |
state = gr.State([])
|
| 49 |
|