Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import AutoTokenizer, AutoModelForQuestionAnswering | |
| import torch | |
| model_name = "Aurelie123/my_qa_model" | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForQuestionAnswering.from_pretrained(model_name) | |
| def answer_question(context, question): | |
| inputs = tokenizer(question, context, return_tensors="pt") | |
| with torch.no_grad(): | |
| outputs = model(**inputs) | |
| start = torch.argmax(outputs.start_logits) | |
| end = torch.argmax(outputs.end_logits) + 1 | |
| answer = tokenizer.convert_tokens_to_string( | |
| tokenizer.convert_ids_to_tokens(inputs["input_ids"][0][start:end]) | |
| ) | |
| return answer | |
| demo = gr.Interface( | |
| fn=answer_question, | |
| inputs=[gr.Textbox(label="Context") ,gr.Textbox(label="Question")], | |
| outputs="text", | |
| title="My QA Model" | |
| ) | |
| demo.launch() | |