Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import torch | |
| # Import libraries from transformers | |
| from transformers import AutoTokenizer, AutoModelForQuestionAnswering | |
| # Define model and tokenizer | |
| model_name = "google-bert/bert-large-uncased-whole-word-masking-finetuned-squad" | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForQuestionAnswering.from_pretrained(model_name) | |
| def answer_question(context, question): | |
| """ | |
| This function takes a context and question as input, | |
| performs question answering using the loaded model, | |
| and returns the predicted answer. | |
| """ | |
| # Encode the context and question with special character handling | |
| inputs = tokenizer(context, question, return_tensors="pt", truncation=True) | |
| # Perform question answering | |
| outputs = model(**inputs) | |
| # Get the predicted start and end token positions | |
| start_scores, end_scores = outputs.start_logits, outputs.end_scores | |
| # Decode the answer based on predicted positions | |
| answer_start = torch.argmax(start_scores) | |
| answer_end = torch.argmax(end_scores) + 1 | |
| # Get answer tokens and convert them to string, removing special tokens | |
| answer = tokenizer.convert_ids_to_tokens(inputs["input_ids"][0][answer_start:answer_end]) | |
| answer = "".join(answer[2:-2]) # Remove special tokens ([CLS] and [SEP]) | |
| return answer | |
| # Define the Gradio interface | |
| interface = gr.Interface( | |
| fn=answer_question, | |
| inputs=[gr.Textbox("Context"), gr.Textbox("Question")], | |
| outputs="text", | |
| title="Question Answering with BERT", | |
| description="Ask a question about the provided context and get an answer powered by Google BERT model.", | |
| ) | |
| # Launch the Gradio app | |
| interface.launch() | |