File size: 490 Bytes
166844f
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
from transformers import AutoModelForQuestionAnswering, AutoTokenizer,pipeline
import gradio as grad
import ast
model_name='deepset/roberta-base-squad2'
my_pipeline=pipeline('question-answering',model=model_name, tokenizer=model_name)

def answer_question(question,context):
  text="{'+''question':'"+question+"','context':'"+context+"'}"
  di=ast.literal_eval(text)
  response=my_pipeline(di)
  return response
grad.Interface(answer_question,inputs=['text','text'],outputs='text').launch()