Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -11,10 +11,15 @@ import gradio as gr
|
|
| 11 |
# ---- Load model and tokenizer ----
|
| 12 |
model_name = "Mudasir692/mbart-eng-ur"
|
| 13 |
|
| 14 |
-
# Fix config issue (early_stopping = None)
|
| 15 |
config = AutoConfig.from_pretrained(model_name)
|
|
|
|
|
|
|
| 16 |
if getattr(config, "early_stopping", None) is None:
|
| 17 |
config.early_stopping = True
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
|
| 19 |
tokenizer = MBart50TokenizerFast.from_pretrained(model_name, src_lang="en_XX", tgt_lang="ur_PK")
|
| 20 |
model = MBartForConditionalGeneration.from_pretrained(model_name, config=config)
|
|
@@ -23,8 +28,14 @@ model = MBartForConditionalGeneration.from_pretrained(model_name, config=config)
|
|
| 23 |
def translate_to_urdu(text):
|
| 24 |
if not text.strip():
|
| 25 |
return "Please enter some English text."
|
|
|
|
| 26 |
inputs = tokenizer(text, return_tensors="pt", padding=True)
|
| 27 |
-
translated_tokens = model.generate(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
urdu_output = tokenizer.decode(translated_tokens[0], skip_special_tokens=True)
|
| 29 |
return urdu_output
|
| 30 |
|
|
|
|
| 11 |
# ---- Load model and tokenizer ----
|
| 12 |
model_name = "Mudasir692/mbart-eng-ur"
|
| 13 |
|
|
|
|
| 14 |
config = AutoConfig.from_pretrained(model_name)
|
| 15 |
+
|
| 16 |
+
# Fix missing or invalid parameters
|
| 17 |
if getattr(config, "early_stopping", None) is None:
|
| 18 |
config.early_stopping = True
|
| 19 |
+
if getattr(config, "max_length", None) is None:
|
| 20 |
+
config.max_length = 128 # ✅ set a safe limit
|
| 21 |
+
if getattr(config, "num_beams", None) is None:
|
| 22 |
+
config.num_beams = 4
|
| 23 |
|
| 24 |
tokenizer = MBart50TokenizerFast.from_pretrained(model_name, src_lang="en_XX", tgt_lang="ur_PK")
|
| 25 |
model = MBartForConditionalGeneration.from_pretrained(model_name, config=config)
|
|
|
|
| 28 |
def translate_to_urdu(text):
|
| 29 |
if not text.strip():
|
| 30 |
return "Please enter some English text."
|
| 31 |
+
|
| 32 |
inputs = tokenizer(text, return_tensors="pt", padding=True)
|
| 33 |
+
translated_tokens = model.generate(
|
| 34 |
+
**inputs,
|
| 35 |
+
max_length=128, # ✅ explicitly set again
|
| 36 |
+
num_beams=4,
|
| 37 |
+
early_stopping=True
|
| 38 |
+
)
|
| 39 |
urdu_output = tokenizer.decode(translated_tokens[0], skip_special_tokens=True)
|
| 40 |
return urdu_output
|
| 41 |
|