import gradio as gr from transformers import AutoTokenizer, AutoModelForSeq2SeqLM # Charger le modèle depuis Hugging Face model_name = "MaroneAI/Niani-Translator_French-To-Wolof" # ✅ Correction : ignorer le tokenizer.json et forcr SentencePiece tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False) model = AutoModelForSeq2SeqLM.from_pretrained(model_name) def translate(text): inputs = tokenizer(text, return_tensors="pt", padding=True) outputs = model.generate(**inputs, max_length=256) return tokenizer.decode(outputs[0], skip_special_tokens=True) demo = gr.Interface( fn=translate, inputs=gr.Textbox(lines=3, placeholder="Entrez un texte en français..."), outputs=gr.Textbox(label="Traduction en wolof"), title="Niani Translator 🌍", description="Modèle de traduction Français → Wolof fine-tuné par MaroneAI." ) if __name__ == "__main__": demo.launch()