Spaces:
Sleeping
Sleeping
| from langchain_google_genai import ChatGoogleGenerativeAI | |
| from langchain_community.document_loaders import TextLoader | |
| from langchain.text_splitter import RecursiveCharacterTextSplitter | |
| from langchain_google_genai import GoogleGenerativeAIEmbeddings | |
| from langchain_community.vectorstores import FAISS | |
| from dotenv import load_dotenv | |
| load_dotenv() | |
| llm = ChatGoogleGenerativeAI(model="gemini-1.5-flash") | |
| embeddings = GoogleGenerativeAIEmbeddings(model="models/text-embedding-004") | |
| # --- Load products --- | |
| loader = TextLoader("products.json") | |
| docs = loader.load() | |
| splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100) | |
| text = splitter.split_documents(docs) | |
| product_store = FAISS.from_documents(documents=text, embedding=embeddings) | |
| # --- Load FAQs --- | |
| loader = TextLoader("faqs.json") | |
| docs = loader.load() | |
| splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100) | |
| text = splitter.split_documents(docs) | |
| faq_store = FAISS.from_documents(documents=text, embedding=embeddings) | |
| # --- Retrievers --- | |
| product_retriever = product_store.as_retriever(search_kwargs={"k": 3}) | |
| faq_retriever = faq_store.as_retriever(search_kwargs={"k": 3}) | |
| # --- Keywords --- | |
| FAQ_KEYWORDS = { | |
| "delivery", "ship", "shipping", "return", "refund", "warranty", | |
| "payment", "pay", "exchange", "order", "track", "policy" | |
| } | |
| PRODUCT_KEYWORDS = { | |
| "price", "spec", "specs", "specifications", "feature", "features", | |
| "compare", "details", "model", "brand", "laptop", "mobile", | |
| "phone", "shoes", "camera", "ram", "ssd", "storage", "gpu", "cpu" | |
| } | |
| # --- Conversation history --- | |
| conversation_history = [] | |
| # --- Functions --- | |
| def get_relevant_retriever(query: str): | |
| q = query.lower() | |
| if any(word in q for word in FAQ_KEYWORDS): | |
| return faq_retriever | |
| elif any(word in q for word in PRODUCT_KEYWORDS): | |
| return product_retriever | |
| else: | |
| return product_retriever # default | |
| def ask_bot(query: str): | |
| retriever = get_relevant_retriever(query) | |
| docs = retriever.get_relevant_documents(query) | |
| context = "\n".join([d.page_content for d in docs]) | |
| # --- Check if context is empty --- | |
| print("Context retrieved:", context) # Debug line | |
| if not context.strip(): | |
| return "I don't have specific information about that product in our database. Please contact support for current pricing and availability." | |
| # --- Add previous conversation history to context --- | |
| history_text = "" | |
| for turn in conversation_history[-6:]: # last 3 user-bot pairs | |
| history_text += f"User: {turn['user']}\nBot: {turn['bot']}\n" | |
| full_prompt = f"""CRITICAL: You must ONLY use the data provided below. DO NOT use any pre-trained promotional responses. | |
| If the context data below contains the requested product information, provide it directly. | |
| If the context data does not contain the information, say "I don't have that information available." | |
| DO NOT EVER SAY: "Amazing offers", "Weekly deals", "Check our deals page", or any promotional content. | |
| Previous conversation: | |
| {history_text} | |
| Data from our store: | |
| {context} | |
| Customer question: {query} | |
| Answer using ONLY the above store data:""" | |
| response = llm.invoke(full_prompt) | |
| # --- Save this turn to history --- | |
| conversation_history.append({"user": query, "bot": response.content}) | |
| return response.content | |
| # --- Main Loop --- | |
| if __name__ == "__main__": | |
| print("Chatbot started. Type 'exit' or 'quit' to stop.\n") | |
| while True: | |
| query = input("You: ") | |
| if query.lower() in ["exit", "quit", "q"]: | |
| break | |
| answer = ask_bot(query) | |
| print("Bot:", answer) |