Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -20,7 +20,7 @@ model, tokenizer = load_model()
|
|
| 20 |
|
| 21 |
def translate_text(text, model, tokenizer):
|
| 22 |
inputs = tokenizer.encode(text, return_tensors="pt", truncation=True)
|
| 23 |
-
outputs = model.generate(inputs, max_length=
|
| 24 |
translated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 25 |
return translated_text
|
| 26 |
|
|
@@ -30,12 +30,7 @@ st.title("English to Urdu Translation")
|
|
| 30 |
# Input text from the user
|
| 31 |
text_to_translate = st.text_area("Enter English text to translate:")
|
| 32 |
|
| 33 |
-
if
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
st.success("Translation completed!")
|
| 38 |
-
st.markdown(f"### Translated Text:\n{translated_text}")
|
| 39 |
-
else:
|
| 40 |
-
st.error("Please enter some text to translate.")
|
| 41 |
-
|
|
|
|
| 20 |
|
| 21 |
def translate_text(text, model, tokenizer):
|
| 22 |
inputs = tokenizer.encode(text, return_tensors="pt", truncation=True)
|
| 23 |
+
outputs = model.generate(inputs, max_length=1024, num_beams=5, early_stopping=True)
|
| 24 |
translated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 25 |
return translated_text
|
| 26 |
|
|
|
|
| 30 |
# Input text from the user
|
| 31 |
text_to_translate = st.text_area("Enter English text to translate:")
|
| 32 |
|
| 33 |
+
if text_to_translate.strip():
|
| 34 |
+
with st.spinner("Translating..."):
|
| 35 |
+
translated_text = translate_text(text_to_translate, model, tokenizer)
|
| 36 |
+
st.markdown(f"### Translated Text:\n{translated_text}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|