omarbayoumi2 commited on
Commit
55cdbf4
·
verified ·
1 Parent(s): d2926f6

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -0
app.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import pipeline
2
+ import gradio as gr
3
+
4
+ # 1) Load your fine-tuned QA model from the Hub
5
+ MODEL_ID = "omarbayoumi2/bert-base-qa-squad-colab"
6
+
7
+ qa = pipeline(
8
+ "question-answering",
9
+ model=MODEL_ID,
10
+ tokenizer=MODEL_ID,
11
+ )
12
+
13
+ # 2) Inference function
14
+ def answer(question, context):
15
+ if not question or not context:
16
+ return "Please provide both a question and a context."
17
+ result = qa(question=question, context=context)
18
+ # result is a dict: {'score': ..., 'start': ..., 'end': ..., 'answer': ...}
19
+ return result["answer"]
20
+
21
+ # 3) Build Gradio interface
22
+ iface = gr.Interface(
23
+ fn=answer,
24
+ inputs=[
25
+ gr.Textbox(label="Question", placeholder="Ask a question about the context..."),
26
+ gr.Textbox(label="Context", lines=8, placeholder="Paste the context paragraph here..."),
27
+ ],
28
+ outputs=gr.Textbox(label="Answer"),
29
+ title="BERT-base SQuAD QA Demo",
30
+ description=(
31
+ "Fine-tuned `bert-base-uncased` on SQuAD v1.1.\n"
32
+ "Model: omarbayoumi2/bert-base-qa-squad-colab"
33
+ ),
34
+ )
35
+
36
+ if __name__ == "__main__":
37
+ iface.launch()