Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,93 +1,94 @@
|
|
| 1 |
-
from sentence_transformers import SentenceTransformer
|
| 2 |
-
import pandas as pd
|
| 3 |
-
import numpy as np
|
| 4 |
-
import faiss
|
| 5 |
-
from langchain_google_genai import ChatGoogleGenerativeAI
|
| 6 |
-
from langchain_core.prompts import ChatPromptTemplate
|
| 7 |
-
from langchain_core.output_parsers import StrOutputParser
|
| 8 |
-
import gradio as gr
|
| 9 |
-
import os
|
| 10 |
-
|
| 11 |
-
model = SentenceTransformer("all-MiniLM-L6-v2")
|
| 12 |
-
df = pd.read_csv("Final_faqs.csv")
|
| 13 |
-
df["question_plus_answer"] = "Question : " + df["Question"] + " , Answer : " + df["Answer"]
|
| 14 |
-
|
| 15 |
-
def generate_embeddings(text):
|
| 16 |
-
embed = model.encode(text)
|
| 17 |
-
embed = embed / np.linalg.norm(embed)
|
| 18 |
-
return embed
|
| 19 |
-
|
| 20 |
-
embeddings = df['question_plus_answer'].apply(generate_embeddings).tolist()
|
| 21 |
-
embeddings_array = np.array(embeddings).astype(np.float32)
|
| 22 |
-
dimensions = embeddings_array.shape[1]
|
| 23 |
-
|
| 24 |
-
index = faiss.IndexFlatIP(dimensions)
|
| 25 |
-
index.add(embeddings_array)
|
| 26 |
-
|
| 27 |
-
faiss.write_index(index, "question_answer_embeddings_cosine.index")
|
| 28 |
-
loaded_index = faiss.read_index("question_answer_embeddings_cosine.index")
|
| 29 |
-
|
| 30 |
-
def search_similar_questions(query, k=3):
|
| 31 |
-
|
| 32 |
-
# Generate and normalize query embedding
|
| 33 |
-
query_embedding = generate_embeddings(query)
|
| 34 |
-
query_embedding = np.array([query_embedding]).astype('float32')
|
| 35 |
-
|
| 36 |
-
# Search in FAISS index (results will be in cosine similarity order)
|
| 37 |
-
similarities, indices = loaded_index.search(query_embedding, k)
|
| 38 |
-
|
| 39 |
-
# Return results (similarities will be between 0 and 1)
|
| 40 |
-
return df['question_plus_answer'][indices[0][0]]
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
api_key = os.environ.get("GEMINI_API_KEY")
|
| 44 |
-
if api_key is None:
|
| 45 |
-
raise ValueError("The GEMINI_API_KEY environment variable is not set.")
|
| 46 |
-
|
| 47 |
-
gemini_model = ChatGoogleGenerativeAI(model='gemini-1.5-flash' , google_api_key=api_key)
|
| 48 |
-
|
| 49 |
-
parser = StrOutputParser()
|
| 50 |
-
|
| 51 |
-
from langchain.prompts import ChatPromptTemplate
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
chat_template = ChatPromptTemplate.from_messages([
|
| 55 |
-
("system", """You are a helpful and concise assistant trained on a set of Frequently Asked Questions (FAQs).
|
| 56 |
-
Your goal is to answer user questions based on the most relevant FAQ or let the user know when no direct match is found.
|
| 57 |
-
|
| 58 |
-
Rules:
|
| 59 |
-
1. If the user greets (e.g., "hi", "hello", "hey", "buddy", "bro", "how are you", etc.), reply with a warm, friendly message and DO NOT reference FAQ data.
|
| 60 |
-
2. If a relevant FAQ is found, use it to answer the user’s question directly but not exactly same as retrieved query , add something by yourself.
|
| 61 |
-
3. If no FAQ matches the query then show the closest retrieved content.
|
| 62 |
-
|
| 63 |
-
{retrieved_context}
|
| 64 |
-
"""),
|
| 65 |
-
|
| 66 |
-
("human", """User Query:
|
| 67 |
-
{query}
|
| 68 |
-
|
| 69 |
-
Your Response:""")
|
| 70 |
-
])
|
| 71 |
-
chain = chat_template | gemini_model | parser
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
retrieved = search_similar_questions(message)
|
| 79 |
-
response = chain.invoke({'retrieved_context': retrieved, 'query': message})
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
|
|
|
|
|
| 1 |
+
from sentence_transformers import SentenceTransformer
|
| 2 |
+
import pandas as pd
|
| 3 |
+
import numpy as np
|
| 4 |
+
import faiss
|
| 5 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
| 6 |
+
from langchain_core.prompts import ChatPromptTemplate
|
| 7 |
+
from langchain_core.output_parsers import StrOutputParser
|
| 8 |
+
import gradio as gr
|
| 9 |
+
import os
|
| 10 |
+
|
| 11 |
+
model = SentenceTransformer("all-MiniLM-L6-v2")
|
| 12 |
+
df = pd.read_csv("Final_faqs.csv")
|
| 13 |
+
df["question_plus_answer"] = "Question : " + df["Question"] + " , Answer : " + df["Answer"]
|
| 14 |
+
|
| 15 |
+
def generate_embeddings(text):
|
| 16 |
+
embed = model.encode(text)
|
| 17 |
+
embed = embed / np.linalg.norm(embed)
|
| 18 |
+
return embed
|
| 19 |
+
|
| 20 |
+
embeddings = df['question_plus_answer'].apply(generate_embeddings).tolist()
|
| 21 |
+
embeddings_array = np.array(embeddings).astype(np.float32)
|
| 22 |
+
dimensions = embeddings_array.shape[1]
|
| 23 |
+
|
| 24 |
+
index = faiss.IndexFlatIP(dimensions)
|
| 25 |
+
index.add(embeddings_array)
|
| 26 |
+
|
| 27 |
+
faiss.write_index(index, "question_answer_embeddings_cosine.index")
|
| 28 |
+
loaded_index = faiss.read_index("question_answer_embeddings_cosine.index")
|
| 29 |
+
|
| 30 |
+
def search_similar_questions(query, k=3):
|
| 31 |
+
|
| 32 |
+
# Generate and normalize query embedding
|
| 33 |
+
query_embedding = generate_embeddings(query)
|
| 34 |
+
query_embedding = np.array([query_embedding]).astype('float32')
|
| 35 |
+
|
| 36 |
+
# Search in FAISS index (results will be in cosine similarity order)
|
| 37 |
+
similarities, indices = loaded_index.search(query_embedding, k)
|
| 38 |
+
|
| 39 |
+
# Return results (similarities will be between 0 and 1)
|
| 40 |
+
return df['question_plus_answer'][indices[0][0]]
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
api_key = os.environ.get("GEMINI_API_KEY")
|
| 44 |
+
if api_key is None:
|
| 45 |
+
raise ValueError("The GEMINI_API_KEY environment variable is not set.")
|
| 46 |
+
|
| 47 |
+
gemini_model = ChatGoogleGenerativeAI(model='gemini-1.5-flash' , google_api_key=api_key)
|
| 48 |
+
|
| 49 |
+
parser = StrOutputParser()
|
| 50 |
+
|
| 51 |
+
from langchain.prompts import ChatPromptTemplate
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
chat_template = ChatPromptTemplate.from_messages([
|
| 55 |
+
("system", """You are a helpful and concise assistant trained on a set of Frequently Asked Questions (FAQs).
|
| 56 |
+
Your goal is to answer user questions based on the most relevant FAQ or let the user know when no direct match is found.
|
| 57 |
+
|
| 58 |
+
Rules:
|
| 59 |
+
1. If the user greets (e.g., "hi", "hello", "hey", "buddy", "bro", "how are you", etc.), reply with a warm, friendly message and DO NOT reference FAQ data.
|
| 60 |
+
2. If a relevant FAQ is found, use it to answer the user’s question directly but not exactly same as retrieved query , add something by yourself.
|
| 61 |
+
3. If no FAQ matches the query then show the closest retrieved content.
|
| 62 |
+
|
| 63 |
+
{retrieved_context}
|
| 64 |
+
"""),
|
| 65 |
+
|
| 66 |
+
("human", """User Query:
|
| 67 |
+
{query}
|
| 68 |
+
|
| 69 |
+
Your Response:""")
|
| 70 |
+
])
|
| 71 |
+
chain = chat_template | gemini_model | parser
|
| 72 |
+
|
| 73 |
+
# Chatbot logic
|
| 74 |
+
def chatbot_fn(message, history):
|
| 75 |
+
if message.lower().strip() in ["hi", "hello", "hey", "how are you", "bro", "buddy"]:
|
| 76 |
+
return "Hey there! 👋 I'm your friendly assistant. Ask me anything from the FAQs!"
|
| 77 |
+
|
| 78 |
+
retrieved = search_similar_questions(message)
|
| 79 |
+
response = chain.invoke({'retrieved_context': retrieved, 'query': message})
|
| 80 |
+
|
| 81 |
+
return f"📄 **Retrieved FAQ Snippet:**\n{retrieved}\n\n🤖 **Answer:**\n{response}"
|
| 82 |
+
|
| 83 |
+
# Launch real chatbot UI
|
| 84 |
+
chatbot = gr.ChatInterface(
|
| 85 |
+
fn=chatbot_fn,
|
| 86 |
+
title="🤖 FAQ Chatbot",
|
| 87 |
+
theme="soft",
|
| 88 |
+
examples=["tell me something about meesho .", "What is the price of i-phone 15 pro 128 gb?", "Can I open the FiftyOne App in a browser?.", "Hey !"],
|
| 89 |
+
chatbot=gr.Chatbot(show_label=False, avatar_images=("🧑", "🤖")),
|
| 90 |
+
description="Ask your questions and get instant answers based on FAQs. Try saying 'hi' or ask about a process!",
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
if __name__ == "__main__":
|
| 94 |
+
chatbot.launch(share=True)
|