File size: 2,920 Bytes
b35c3d3
fbbb769
59f0b59
b35c3d3
 
fbbb769
0d6aa57
 
 
fbbb769
1557bf2
6af4314
 
0d6aa57
7b152ed
59f0b59
 
 
 
 
 
 
 
0d6aa57
7b152ed
59f0b59
 
 
 
 
 
 
 
7b152ed
 
0d6aa57
 
 
fbbb769
 
 
 
0d6aa57
441ff1d
0d6aa57
fbbb769
 
 
441ff1d
fbbb769
a1ff158
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import gradio as gr
from transformers import pipeline
import asyncio
context = "The Amazon rainforest, also known in English as Amazonia or the Amazon Jungle, is a moist broadleaf forest that covers most of the Amazon basin of South America. This basin encompasses 7,000,000 square kilometres (2,700,000 sq mi), of which 5,500,000 square kilometres (2,100,000 sq mi) are covered by the rainforest. This region includes territory belonging to nine nations. The majority of the forest is contained within Brazil, with 60% of the rainforest, followed by Peru with 13%, Colombia with 10%, and with minor amounts in Venezuela, Ecuador, Bolivia, Guyana, Suriname and French Guiana. The Amazon represents over half of the planet's remaining rainforests, and comprises the largest and most biodiverse tract of tropical rainforest in the world, with an estimated 390 billion individual trees divided into 16,000 species."
question = "Which continent is the Amazon rainforest in?"

m1 = "model1"
m2 = "model2"

def model_performance_UQS(model_checkpoint, context, question):
  print("1---",model_checkpoint)
  print("2---",context)
  print("3---",question)
  if model_checkpoint=="model1":   
    print("in model1")
    #mod=gr.Interface.load("huggingface/amitjohn007/albert-finetuned-squad")
    print("before wait start ---0")
    question_answerer = pipeline("question-answering", model=gr.Interface.load("huggingface/amitjohn007/albert-finetuned-squad"))
    print("before wait start ---1")
    print("before wait start ---2")
    result = question_answerer(question=question, context=context)
    print("before wait start ---3")
    print("before wait start ---4")
  elif model_checkpoint=="model2":
    print("in model2")
    #mod=gr.Interface.load("huggingface/amitjohn007/bert-finetuned-squad")
    print("before wait start ---5")
    question_answerer = pipeline("question-answering", model=gr.Interface.load("huggingface/amitjohn007/bert-finetuned-squad")) 
    print("before wait start ---6")
    print("before wait start ---7")
    result = question_answerer(question=question, context=context)
    print("before wait start ---8")
    print("before wait start ---9")
  else:
    print("in default")
  print("Answer:", result['answer'])
  print("Score:", result['score'])  
  return result


def predict(model_choice,context,question):
  if model_choice=="m1":   
    return model_performance_UQS(model_checkpoint=m1,context=context,question=question)
  elif model_choice=="m2":
    return model_performance_UQS(model_checkpoint=m2,context=context,question=question)

Interface=gr.Interface(
        fn = predict,
        inputs = [gr.inputs.Dropdown(["m1","m2"],"value"),gr.inputs.Textbox(lines=7, default=context, label="Context Paragraph"), gr.inputs.Textbox(lines=2, default=question, label="Question")],
        outputs = [gr.outputs.Textbox(label="Answer"), gr.outputs.Textbox(label="Score")]
).launch(debug=True)