Spaces:
Sleeping
Sleeping
| # import torch | |
| # from transformers import AutoModelForSeq2SeqLM, AutoTokenizer | |
| # import gradio as gr | |
| # # Load your custom model and tokenizer | |
| # model_name = "MiVaCod/mbart-neutralization" | |
| # tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| # model = AutoModelForSeq2SeqLM.from_pretrained(model_name) | |
| # # Function to correct sentences | |
| # def predict(sentence): | |
| # inputs = tokenizer.encode("correction: " + sentence, return_tensors="pt", max_length=512, truncation=True) | |
| # outputs = model.generate(inputs, max_length=128, num_beams=4, early_stopping=True) | |
| # corrected_sentence = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| # return corrected_sentence | |
| # # Gradio Interface | |
| # iface = gr.Interface( | |
| # fn=correct_sentence, | |
| # inputs="text", | |
| # outputs="text", | |
| # title="Sentence Correction", | |
| # description="Enter a sentence to be corrected:", | |
| # theme="compact" | |
| # ) | |
| # # Launch the interface | |
| # gr.Interface(fn=predict, inputs=gr.inputs.Textbox, outputs=gr.outputs.Textbox).launch(share=False) | |
| from transformers import MBartForConditionalGeneration, MBart50Tokenizer | |
| import gradio as grad | |
| model_name = "MiVaCod/mbart-neutralization" | |
| text2text_tkn= MBart50Tokenizer.from_pretrained(model_name) | |
| mdl = MBartForConditionalGeneration.from_pretrained(model_name) | |
| def text2text_paraphrase(sentence1): | |
| inp1 = "rte sentence1: "+sentence1 | |
| enc = text2text_tkn(inp1, return_tensors="pt") | |
| tokens = mdl.generate(**enc) | |
| response=text2text_tkn.batch_decode(tokens) | |
| return response | |
| sent1=grad.Textbox(lines=1, label="Frase misógina", placeholder="Introduce una frase misógina") | |
| out=grad.Textbox(lines=1, label="Frase corregida") | |
| grad.Interface(text2text_paraphrase, inputs=[sent1], outputs=out).launch() | |