File size: 3,247 Bytes
f30ac75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
import os
import faiss
import pickle
import streamlit as st
from groq import Groq
from sentence_transformers import SentenceTransformer

# Initialize the Groq client for Llama LLM
GROQ_API_KEY = "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"
client = Groq(
    api_key=GROQ_API_KEY,
)

# Load FAISS index and metadata
def load_faiss_index(index_path, metadata_path):
    if not os.path.exists(index_path) or not os.path.exists(metadata_path):
        raise FileNotFoundError("FAISS index or metadata file not found!")
    index = faiss.read_index(index_path)
    with open(metadata_path, "rb") as f:
        metadata = pickle.load(f)
    return index, metadata

# Load the FAISS index and metadata
index, metadata = load_faiss_index("faiss_index.bin", "metadata.pkl")

# Initialize the SentenceTransformer for embedding
embedder = SentenceTransformer("all-MiniLM-L6-v2")

# Streamlit UI Configuration
st.set_page_config(page_title="RAG Chatbot: Healthcare, Education & Finance", layout="wide")
st.title("🤖 RAG Chatbot: Healthcare, Education & Finance")
st.markdown("Welcome to the **AI Chatbot**! Ask anything about **healthcare**, **education**, or **finance**.")

# Initialize session state for chat history
if "messages" not in st.session_state:
    st.session_state.messages = []

# Input field for user queries
user_input = st.text_input("💬 Type your message:", placeholder="Ask me anything...")

# Function to generate Llama LLM responses using Groq API
def generate_llama_response(user_query, context):
    response = client.chat.completions.create(
        messages=[
            {"role": "system", "content": "You are an expert assistant in Healthcare, Education, and Finance."},
            {"role": "user", "content": f"Context: {context}\n\nQuery: {user_query}"}
        ],
        model="llama-3.3-70b-versatile",
        stream=False,
    )
    return response.choices[0].message.content

# Function to handle user queries and retrieve responses
def handle_query(user_query):
    # Embed the user query and search FAISS
    query_vector = embedder.encode([user_query])
    _, top_indices = index.search(query_vector, k=3)
    relevant_contexts = [metadata[i] for i in top_indices[0]]
    
    # Combine retrieved chunks into context
    context = "\n".join(relevant_contexts)
    
    # Generate response from Llama LLM
    response = generate_llama_response(user_query, context)
    
    # Save user input and response to FAISS for tracking
    user_vector = embedder.encode([user_query])
    index.add(user_vector)
    metadata.append(user_query)
    faiss.write_index(index, "faiss_index.bin")
    with open("metadata.pkl", "wb") as f:
        pickle.dump(metadata, f)
    
    return response

# Process user input
if user_input:
    # Display user message
    st.session_state.messages.append({"role": "user", "content": user_input})
    
    # Get bot response
    bot_response = handle_query(user_input)
    st.session_state.messages.append({"role": "assistant", "content": bot_response})

# Display chat messages
for message in st.session_state.messages:
    if message["role"] == "user":
        st.markdown(f"**You:** {message['content']}")
    else:
        st.markdown(f"**Bot:** {message['content']}")