| from transformers import AutoTokenizer, AutoModelForQuestionAnswering, pipeline |
| import gradio as gr |
|
|
| model = "hosseinhimself/tara-roberta-base-fa-qa" |
|
|
| |
| tokenizer = AutoTokenizer.from_pretrained(model) |
| model = AutoModelForQuestionAnswering.from_pretrained(model) |
|
|
| |
| qa_pipeline = pipeline("question-answering", model=model, tokenizer=tokenizer) |
|
|
|
|
| def answer_question(context, question): |
| response = qa_pipeline(question=question, context=context) |
| return response['answer'] |
|
|
| |
| interface = gr.Interface( |
| fn=answer_question, |
| inputs=[ |
| gr.Textbox(lines=10, placeholder="Enter context here..."), |
| gr.Textbox(lines=1, placeholder="Enter question here...") |
| ], |
| outputs=gr.Markdown(label="Result"), |
| title="Tara Question Answering Model", |
| description="This model answers questions based on the provided context." |
| ) |
|
|
| |
| interface.launch() |
|
|