Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
|
|
| 1 |
import os
|
| 2 |
import sqlite3
|
| 3 |
import requests
|
|
@@ -7,20 +8,14 @@ import numpy as np
|
|
| 7 |
from sentence_transformers import SentenceTransformer
|
| 8 |
import gradio as gr
|
| 9 |
|
| 10 |
-
# Configure Hugging Face API
|
| 11 |
-
|
| 12 |
-
"Meta-Llama-3-70B-Instruct": "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-70B-Instruct",
|
| 13 |
-
"Meta-Llama-3-8B-Instruct": "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-8B-Instruct",
|
| 14 |
-
"Gemma-2-27B-IT": "https://api-inference.huggingface.co/models/google/gemma-2-27b-it",
|
| 15 |
-
"Gemma-2-27B": "https://api-inference.huggingface.co/models/google/gemma-2-27b"
|
| 16 |
-
}
|
| 17 |
-
|
| 18 |
huggingface_api_key = os.getenv("HUGGINGFACE_API_KEY")
|
| 19 |
headers = {"Authorization": f"Bearer {huggingface_api_key}"}
|
| 20 |
|
| 21 |
# Function to query Hugging Face model
|
| 22 |
-
def query_huggingface(
|
| 23 |
-
response = requests.post(
|
| 24 |
return response.json()
|
| 25 |
|
| 26 |
# Function to extract text from PDF
|
|
@@ -90,10 +85,10 @@ model = SentenceTransformer('all-MiniLM-L6-v2')
|
|
| 90 |
faiss_index, context_list = update_faiss_index()
|
| 91 |
|
| 92 |
# Gradio interface for chatbot
|
| 93 |
-
def chatbot(
|
| 94 |
relevant_contexts = retrieve_relevant_context(faiss_index, context_list, question)
|
| 95 |
user_input = f"question: {question} context: {' '.join(relevant_contexts)}"
|
| 96 |
-
response = query_huggingface(
|
| 97 |
response_text = response[0].get("generated_text", "Sorry, I couldn't generate a response.") if isinstance(response, list) else response.get("generated_text", "Sorry, I couldn't generate a response.")
|
| 98 |
return response_text
|
| 99 |
|
|
@@ -108,7 +103,7 @@ def upload_pdf(file):
|
|
| 108 |
# Gradio interface
|
| 109 |
iface = gr.Interface(
|
| 110 |
fn=chatbot,
|
| 111 |
-
inputs=
|
| 112 |
outputs=gr.Textbox(),
|
| 113 |
title="Storage Warehouse Customer Service Chatbot"
|
| 114 |
)
|
|
|
|
| 1 |
+
from huggingface_hub import InferenceClient
|
| 2 |
import os
|
| 3 |
import sqlite3
|
| 4 |
import requests
|
|
|
|
| 8 |
from sentence_transformers import SentenceTransformer
|
| 9 |
import gradio as gr
|
| 10 |
|
| 11 |
+
# Configure Hugging Face API URL and headers
|
| 12 |
+
model_name = "meta-llama/Meta-Llama-3-8B-Instruct"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
huggingface_api_key = os.getenv("HUGGINGFACE_API_KEY")
|
| 14 |
headers = {"Authorization": f"Bearer {huggingface_api_key}"}
|
| 15 |
|
| 16 |
# Function to query Hugging Face model
|
| 17 |
+
def query_huggingface(payload):
|
| 18 |
+
response = requests.post(f"https://api-inference.huggingface.co/models/{model_name}", headers=headers, json=payload)
|
| 19 |
return response.json()
|
| 20 |
|
| 21 |
# Function to extract text from PDF
|
|
|
|
| 85 |
faiss_index, context_list = update_faiss_index()
|
| 86 |
|
| 87 |
# Gradio interface for chatbot
|
| 88 |
+
def chatbot(question):
|
| 89 |
relevant_contexts = retrieve_relevant_context(faiss_index, context_list, question)
|
| 90 |
user_input = f"question: {question} context: {' '.join(relevant_contexts)}"
|
| 91 |
+
response = query_huggingface({"inputs": user_input})
|
| 92 |
response_text = response[0].get("generated_text", "Sorry, I couldn't generate a response.") if isinstance(response, list) else response.get("generated_text", "Sorry, I couldn't generate a response.")
|
| 93 |
return response_text
|
| 94 |
|
|
|
|
| 103 |
# Gradio interface
|
| 104 |
iface = gr.Interface(
|
| 105 |
fn=chatbot,
|
| 106 |
+
inputs=gr.Textbox(),
|
| 107 |
outputs=gr.Textbox(),
|
| 108 |
title="Storage Warehouse Customer Service Chatbot"
|
| 109 |
)
|