Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import torch | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| import os | |
| # 🔹 Récupérer le token Hugging Face depuis les secrets | |
| hf_token = os.environ.get("HF_TOKEN") | |
| if hf_token is None: | |
| raise ValueError("⚠️ Le token Hugging Face (HF_TOKEN) n'est pas trouvé. " | |
| "Ajoute-le dans les secrets du Space.") | |
| # 🔹 Charger ton modèle texte | |
| model_id = "SafaaAI/final_llm_darija_fr_tech" | |
| tokenizer = AutoTokenizer.from_pretrained( | |
| model_id, | |
| token=hf_token, | |
| trust_remote_code=True | |
| ) | |
| model = AutoModelForCausalLM.from_pretrained( | |
| model_id, | |
| token=hf_token, | |
| trust_remote_code=True, | |
| device_map="auto" | |
| ) | |
| # 🔹 Fonction d'inférence | |
| def chat_with_model(message, history, image): | |
| history = history or [] | |
| full_prompt = "A chat between a curious user and an AI assistant." | |
| # Reconstituer le contexte de la conversation | |
| for user_message, bot_message in history: | |
| full_prompt += f" USER: {user_message} ASSISTANT: {bot_message}" | |
| # Ajouter image si présente | |
| if image is not None: | |
| # Ici, ton modèle ne comprend pas l'image. On ajoute une note textuelle. | |
| message = f"[Image fournie par l'utilisateur] {message}" | |
| # Ajouter le message actuel | |
| full_prompt += f" USER: {message} ASSISTANT:" | |
| # Encoder et générer la réponse | |
| inputs = tokenizer(full_prompt, return_tensors="pt").to(model.device) | |
| input_ids = inputs["input_ids"] | |
| attention_mask = inputs["attention_mask"] | |
| with torch.no_grad(): | |
| output_ids = model.generate( | |
| input_ids, | |
| attention_mask=attention_mask, | |
| max_new_tokens=200, | |
| do_sample=True, | |
| top_p=0.9, | |
| temperature=0.7 | |
| ) | |
| response = tokenizer.decode(output_ids[0], skip_special_tokens=True) | |
| response_start_index = response.rfind("ASSISTANT:") | |
| if response_start_index != -1: | |
| response = response[response_start_index + len("ASSISTANT:"):].strip() | |
| # Ajouter la réponse à l’historique | |
| history.append((message, response)) | |
| return history, history | |
| # 🔹 Interface Gradio avec zone image | |
| with gr.Blocks() as demo: | |
| gr.Markdown("## 💬 Chatbot SafaaAI - LLM (Darija + Français + Technique)") | |
| with gr.Row(): | |
| chatbot = gr.Chatbot(scale=3) | |
| with gr.Column(scale=1): | |
| msg = gr.Textbox(label="Écris ton message ici") | |
| image = gr.Image(label="Ajoute une image (optionnel)", type="filepath") | |
| clear = gr.Button("🧹 Effacer la conversation") | |
| state = gr.State([]) | |
| # Associer envoi du message et image à la fonction | |
| msg.submit(chat_with_model, [msg, state, image], [chatbot, state]) | |
| clear.click(lambda: ([], []), None, [chatbot, state]) | |
| # 🔹 Lancer l'app | |
| if __name__ == "__main__": | |
| demo.launch() |