File size: 3,640 Bytes
2ab5d77
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bba9d1a
 
 
 
 
2ab5d77
 
 
 
 
bba9d1a
316e205
bba9d1a
 
316e205
bba9d1a
2ab5d77
bba9d1a
2ab5d77
 
bba9d1a
2ab5d77
 
bba9d1a
2ab5d77
bba9d1a
2ab5d77
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_community.document_loaders import TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_google_genai import GoogleGenerativeAIEmbeddings
from langchain_community.vectorstores import FAISS
from dotenv import load_dotenv

load_dotenv()

llm = ChatGoogleGenerativeAI(model="gemini-1.5-flash")

embeddings = GoogleGenerativeAIEmbeddings(model="models/text-embedding-004")

# --- Load products ---
loader = TextLoader("products.json")
docs = loader.load()
splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
text = splitter.split_documents(docs)
product_store = FAISS.from_documents(documents=text, embedding=embeddings)

# --- Load FAQs ---
loader = TextLoader("faqs.json")
docs = loader.load()
splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
text = splitter.split_documents(docs)
faq_store = FAISS.from_documents(documents=text, embedding=embeddings)

# --- Retrievers ---
product_retriever = product_store.as_retriever(search_kwargs={"k": 3})
faq_retriever = faq_store.as_retriever(search_kwargs={"k": 3})

# --- Keywords ---
FAQ_KEYWORDS = {
    "delivery", "ship", "shipping", "return", "refund", "warranty",
    "payment", "pay", "exchange", "order", "track", "policy"
}
PRODUCT_KEYWORDS = {
    "price", "spec", "specs", "specifications", "feature", "features",
    "compare", "details", "model", "brand", "laptop", "mobile",
    "phone", "shoes", "camera", "ram", "ssd", "storage", "gpu", "cpu"
}

# --- Conversation history ---
conversation_history = []

# --- Functions ---
def get_relevant_retriever(query: str):
    q = query.lower()
    if any(word in q for word in FAQ_KEYWORDS):
        return faq_retriever
    elif any(word in q for word in PRODUCT_KEYWORDS):
        return product_retriever
    else:
        return product_retriever  # default

def ask_bot(query: str):
    retriever = get_relevant_retriever(query)
    docs = retriever.get_relevant_documents(query)
    context = "\n".join([d.page_content for d in docs])

    # --- Check if context is empty ---
    print("Context retrieved:", context)  # Debug line
    if not context.strip():
        return "I don't have specific information about that product in our database. Please contact support for current pricing and availability."

    # --- Add previous conversation history to context ---
    history_text = ""
    for turn in conversation_history[-6:]:  # last 3 user-bot pairs
        history_text += f"User: {turn['user']}\nBot: {turn['bot']}\n"

    full_prompt = f"""CRITICAL: You must ONLY use the data provided below. DO NOT use any pre-trained promotional responses.

If the context data below contains the requested product information, provide it directly.
If the context data does not contain the information, say "I don't have that information available."

DO NOT EVER SAY: "Amazing offers", "Weekly deals", "Check our deals page", or any promotional content.

Previous conversation:
{history_text}

Data from our store:
{context}

Customer question: {query}

Answer using ONLY the above store data:"""
    
    response = llm.invoke(full_prompt)

    # --- Save this turn to history ---
    conversation_history.append({"user": query, "bot": response.content})

    return response.content

# --- Main Loop ---
if __name__ == "__main__":
    print("Chatbot started. Type 'exit' or 'quit' to stop.\n")
    while True:
        query = input("You: ")
        if query.lower() in ["exit", "quit", "q"]:
            break
        answer = ask_bot(query)
        print("Bot:", answer)