import os from langchain.vectorstores import FAISS from langchain_google_genai import GoogleGenerativeAIEmbeddings from langchain.chains.question_answering import load_qa_chain from langchain.prompts import PromptTemplate from langchain_google_genai import ChatGoogleGenerativeAI FAISS_INDEX_PATH = "financial_faiss_index" def load_vector_store(api_key): """Loads the FAISS vector store if available.""" if os.path.exists(f"{FAISS_INDEX_PATH}.faiss"): embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001", google_api_key=api_key) return FAISS.load_local(FAISS_INDEX_PATH, embeddings, allow_dangerous_deserialization=True) else: return None def query_chatbot(question, api_key): """Processes user queries using FAISS and Gemini AI.""" vector_store = load_vector_store(api_key) if not vector_store: return "⚠️ No precomputed financial data found. Please run 'Precompute' first." docs = vector_store.similarity_search(question) # Define chatbot prompt prompt_template = """ You are a financial expert specializing in banking, RBI regulations, fraud detection, and stock trends. Answer the question based on the given financial documents. Context:\n {context}\n Question: \n{question}\n Answer: """ model = ChatGoogleGenerativeAI(model="gemini-2.0-flash-exp", temperature=0.3, google_api_key=api_key) prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"]) chain = load_qa_chain(model, chain_type="stuff", prompt=prompt) response = chain({"input_documents": docs, "question": question}, return_only_outputs=True) return response["output_text"]