Spaces:
Sleeping
Sleeping
maj model
Browse files
app.py
CHANGED
|
@@ -5,7 +5,7 @@ import gradio as gr
|
|
| 5 |
# --------------------------------------------------
|
| 6 |
# Chargement du pipeline NLLB
|
| 7 |
# --------------------------------------------------
|
| 8 |
-
MODEL_NAME = "facebook/nllb-200-
|
| 9 |
|
| 10 |
device = 0 if torch.cuda.is_available() else -1
|
| 11 |
print(f"🚀 Chargement du modèle {MODEL_NAME} sur {'GPU' if device == 0 else 'CPU'}...")
|
|
@@ -13,7 +13,9 @@ print(f"🚀 Chargement du modèle {MODEL_NAME} sur {'GPU' if device == 0 else '
|
|
| 13 |
translator = pipeline(
|
| 14 |
"translation",
|
| 15 |
model=MODEL_NAME,
|
| 16 |
-
device=device
|
|
|
|
|
|
|
| 17 |
)
|
| 18 |
|
| 19 |
# --------------------------------------------------
|
|
@@ -87,4 +89,4 @@ with gr.Blocks(title="🌍 Traduction EWE") as demo:
|
|
| 87 |
"""
|
| 88 |
)
|
| 89 |
|
| 90 |
-
demo.launch()
|
|
|
|
| 5 |
# --------------------------------------------------
|
| 6 |
# Chargement du pipeline NLLB
|
| 7 |
# --------------------------------------------------
|
| 8 |
+
MODEL_NAME = "facebook/nllb-200-distilled-1.3B"
|
| 9 |
|
| 10 |
device = 0 if torch.cuda.is_available() else -1
|
| 11 |
print(f"🚀 Chargement du modèle {MODEL_NAME} sur {'GPU' if device == 0 else 'CPU'}...")
|
|
|
|
| 13 |
translator = pipeline(
|
| 14 |
"translation",
|
| 15 |
model=MODEL_NAME,
|
| 16 |
+
device=device,
|
| 17 |
+
src_lang="fra_Latn",
|
| 18 |
+
tgt_lang="ewe_Latn"
|
| 19 |
)
|
| 20 |
|
| 21 |
# --------------------------------------------------
|
|
|
|
| 89 |
"""
|
| 90 |
)
|
| 91 |
|
| 92 |
+
demo.launch()
|