Spaces:
Sleeping
Sleeping
Sanzana Lora
commited on
Update app.py
Browse files
app.py
CHANGED
|
@@ -15,12 +15,18 @@ paraphrase_model = AutoModelForSeq2SeqLM.from_pretrained("csebuetnlp/banglat5_ba
|
|
| 15 |
paraphrase_tokenizer = AutoTokenizer.from_pretrained("csebuetnlp/banglat5_banglaparaphrase")
|
| 16 |
|
| 17 |
# Function to perform machine translation
|
| 18 |
-
def
|
| 19 |
inputs = translation_tokenizer_en_bn("translate: " + input_text, return_tensors="pt")
|
| 20 |
outputs = translation_model_en_bn.generate(**inputs)
|
| 21 |
translated_text = translation_tokenizer_en_bn.decode(outputs[0], skip_special_tokens=True)
|
| 22 |
return translated_text
|
| 23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
# Function to perform summarization
|
| 25 |
def summarize_text(input_text):
|
| 26 |
inputs = summarization_tokenizer("summarize: " + input_text, return_tensors="pt")
|
|
@@ -35,31 +41,24 @@ def paraphrase_text(input_text):
|
|
| 35 |
paraphrased_text = paraphrase_tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 36 |
return paraphrased_text
|
| 37 |
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
)
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
def update_interface(change):
|
| 48 |
-
selected_task = task_selector.value
|
| 49 |
-
|
| 50 |
-
if selected_task == 'Translate':
|
| 51 |
-
iface.fn = translate_text
|
| 52 |
-
elif selected_task == 'Summarize':
|
| 53 |
-
iface.fn = summarize_text
|
| 54 |
-
elif selected_task == 'Paraphrase':
|
| 55 |
-
iface.fn = paraphrase_text
|
| 56 |
|
| 57 |
-
#
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
|
|
|
|
|
|
|
|
|
| 61 |
)
|
| 62 |
|
| 63 |
-
|
| 64 |
# Launch the Gradio app
|
| 65 |
iface.launch(inline=False)
|
|
|
|
| 15 |
paraphrase_tokenizer = AutoTokenizer.from_pretrained("csebuetnlp/banglat5_banglaparaphrase")
|
| 16 |
|
| 17 |
# Function to perform machine translation
|
| 18 |
+
def translate_text_en_bn(input_text):
|
| 19 |
inputs = translation_tokenizer_en_bn("translate: " + input_text, return_tensors="pt")
|
| 20 |
outputs = translation_model_en_bn.generate(**inputs)
|
| 21 |
translated_text = translation_tokenizer_en_bn.decode(outputs[0], skip_special_tokens=True)
|
| 22 |
return translated_text
|
| 23 |
|
| 24 |
+
def translate_text_bn_en(input_text):
|
| 25 |
+
inputs = translation_tokenizer_bn_en("translate: " + input_text, return_tensors="pt")
|
| 26 |
+
outputs = translation_model_bn_en.generate(**inputs)
|
| 27 |
+
translated_text = translation_tokenizer_bn_en.decode(outputs[0], skip_special_tokens=True)
|
| 28 |
+
return translated_text
|
| 29 |
+
|
| 30 |
# Function to perform summarization
|
| 31 |
def summarize_text(input_text):
|
| 32 |
inputs = summarization_tokenizer("summarize: " + input_text, return_tensors="pt")
|
|
|
|
| 41 |
paraphrased_text = paraphrase_tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 42 |
return paraphrased_text
|
| 43 |
|
| 44 |
+
def process_text(text, task):
|
| 45 |
+
if task == "Translate_English_to_Bengali":
|
| 46 |
+
return translate_text_en_bn(text)
|
| 47 |
+
elif task == "Translate_Bengali_to_English":
|
| 48 |
+
return translate_text_bn_en(text)
|
| 49 |
+
elif task == "Summarize":
|
| 50 |
+
return summarize_text(text)
|
| 51 |
+
elif task == "Paraphrase":
|
| 52 |
+
return paraphrase_text(text)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
|
| 54 |
+
# Define the Gradio interface
|
| 55 |
+
iface = gr.Interface(
|
| 56 |
+
fn=process_text,
|
| 57 |
+
inputs=["text", gr.Dropdown(["Translate_English_to_Bengali", "Translate_Bengali_to_English", "Summarize", "Paraphrase"])],
|
| 58 |
+
outputs="text",
|
| 59 |
+
live=False,
|
| 60 |
+
title="Usage of BanglaT5 Model"
|
| 61 |
)
|
| 62 |
|
|
|
|
| 63 |
# Launch the Gradio app
|
| 64 |
iface.launch(inline=False)
|