Geethika Isuru Sampath commited on
Commit
5777026
·
1 Parent(s): dfb517b

NewWithAPI

Browse files
Files changed (2) hide show
  1. app.py +22 -6
  2. requirements.txt +4 -1
app.py CHANGED
@@ -1,16 +1,32 @@
1
  import gradio as gr
2
- from transformers import pipeline
 
 
3
 
4
- # Load the question-answering model
5
- qa_pipeline = pipeline("question-answering", model="deepset/roberta-base-squad2", tokenizer="deepset/roberta-base-squad2")
 
 
 
6
 
7
  # Initialize an empty list to store conversation history
8
  conversation_history = []
9
 
 
 
 
 
10
  def answer_question(question, context):
11
- # Use the model to get the answer
12
- result = qa_pipeline(question=question, context=context)
13
- answer = result['answer']
 
 
 
 
 
 
 
14
 
15
  # Add the Q&A to the conversation history
16
  conversation_history.append(f"Human: {question}")
 
1
  import gradio as gr
2
+ import requests
3
+ import os
4
+ from dotenv import load_dotenv
5
 
6
+ # Load environment variables from .env file
7
+ load_dotenv()
8
+
9
+ API_URL = "https://api-inference.huggingface.co/models/deepset/roberta-base-squad2"
10
+ headers = {"Authorization": f"Bearer {os.environ['HUGGINGFACE_API_KEY']}"}
11
 
12
  # Initialize an empty list to store conversation history
13
  conversation_history = []
14
 
15
+ def query(payload):
16
+ response = requests.post(API_URL, headers=headers, json=payload)
17
+ return response.json()
18
+
19
  def answer_question(question, context):
20
+ # Use the Hugging Face Inference API to get the answer
21
+ output = query({
22
+ "inputs": {
23
+ "question": question,
24
+ "context": context
25
+ },
26
+ })
27
+
28
+ # Extract the answer from the API response
29
+ answer = output.get('answer', 'Sorry, I couldn\'t find an answer.')
30
 
31
  # Add the Q&A to the conversation history
32
  conversation_history.append(f"Human: {question}")
requirements.txt CHANGED
@@ -1 +1,4 @@
1
- huggingface_hub==0.22.2
 
 
 
 
1
+ huggingface_hub==0.22.2
2
+ gradio
3
+ requests
4
+ dotenv