MaroneAI commited on
Commit
2990cb0
·
verified ·
1 Parent(s): a6b34c6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -31
app.py CHANGED
@@ -1,41 +1,25 @@
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
- import torch
4
 
5
- # Charger le modèle et le tokenizer
6
  model_name = "MaroneAI/Niani-nllb-Wolof-To-Frensh-615M"
7
- tokenizer = AutoTokenizer.from_pretrained(model_name)
 
 
8
  model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
9
- device = "cuda" if torch.cuda.is_available() else "cpu"
10
- model = model.to(device)
11
 
12
- # Fonction de traduction
13
- def translate_wolof_to_french(text):
14
- if not text.strip():
15
- return "Écris une phrase en Wolof."
16
- inputs = tokenizer(text, return_tensors="pt").to(device)
17
- with torch.no_grad():
18
- outputs = model.generate(
19
- **inputs,
20
- max_length=128,
21
- num_beams=4,
22
- early_stopping=True
23
- )
24
  return tokenizer.decode(outputs[0], skip_special_tokens=True)
25
 
26
- # Interface Gradio
27
- iface = gr.Interface(
28
- fn=translate_wolof_to_french,
29
- inputs=gr.Textbox(lines=3, placeholder="Écris ici ta phrase en Wolof...", label="Texte en Wolof"),
30
- outputs=gr.Textbox(label="Traduction en Français"),
31
- title="Traduction WolofFrançais",
32
- description="Modèle fine-tuné basé sur facebook/nllb-200-distilled-600M.",
33
- examples=[
34
- ["Naka nga def?"],
35
- ["Jërëjëf ci sa jàmm."],
36
- ["Ba beneen yoon."],
37
- ["Kii mooy sama xarit."]
38
- ]
39
  )
40
 
41
- iface.launch()
 
 
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
 
3
 
4
+ # Charger le modèle depuis Hugging Face
5
  model_name = "MaroneAI/Niani-nllb-Wolof-To-Frensh-615M"
6
+
7
+ # ✅ Correction : ignorer le tokenizer.json et forcr SentencePiece
8
+ tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False)
9
  model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
 
 
10
 
11
+ def translate(text):
12
+ inputs = tokenizer(text, return_tensors="pt", padding=True)
13
+ outputs = model.generate(**inputs, max_length=256)
 
 
 
 
 
 
 
 
 
14
  return tokenizer.decode(outputs[0], skip_special_tokens=True)
15
 
16
+ demo = gr.Interface(
17
+ fn=translate,
18
+ inputs=gr.Textbox(lines=3, placeholder="Entrez un texte en Wolof..."),
19
+ outputs=gr.Textbox(label="Traduction en wolof"),
20
+ title="Niani Translator-2 🍉",
21
+ description="Modèle de traduction Français Wolof fine-tuné par MaroneAI."
 
 
 
 
 
 
 
22
  )
23
 
24
+ if __name__ == "__main__":
25
+ demo.launch()