File size: 7,985 Bytes
2e5365a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 |
import os
import json
from datetime import datetime
import streamlit as st
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_chroma import Chroma
from langchain_groq import ChatGroq
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
from vectorize_documents import embeddings
working_dir = os.path.dirname(os.path.abspath(__file__))
config_data = json.load(open(f"{working_dir}/config.json"))
GROQ_API_KEY = config_data["GROQ_API_KEY"]
os.environ["GROQ_API_KEY"]= GROQ_API_KEY
# Ensure the JSON file exists
chat_history_file = "chat_histories.json"
if not os.path.exists(chat_history_file):
with open(chat_history_file, "w") as f:
json.dump({}, f)
# Functions to handle chat history
def load_chat_history():
with open(chat_history_file, "r") as f:
return json.load(f)
def save_chat_history(chat_histories):
with open(chat_history_file, "w") as f:
json.dump(chat_histories, f, indent=4)
# Function to set up vectorstore
def setup_vectorstore():
embeddings = HuggingFaceEmbeddings()
vectorstore = Chroma(persist_directory="vector_db_dir_notes_ai",
embedding_function=embeddings)
return vectorstore
# Function to set up chatbot chain
def chat_chain(vectorstore):
llm = ChatGroq(
model="llama-3.1-70b-versatile",
temperature=0
)
retriever = vectorstore.as_retriever()
memory = ConversationBufferMemory(
llm=llm,
output_key="answer",
memory_key="chat_history",
return_messages=True
)
chain = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=retriever,
chain_type="stuff",
memory=memory,
verbose=True,
return_source_documents=True
)
return chain
# Streamlit UI
st.set_page_config(
page_title="Notes.AI",
page_icon="🤖AI",
layout="centered"
)
st.title("🤖 Notes.AI")
st.subheader("Hey! Here you can search for notes of CSE 7th Sem! Read Notes, Read PYQ answers also!!")
# Step 1: Input user's name
if "username" not in st.session_state:
username = st.text_input("Enter your name to proceed:")
if username:
with st.spinner("Loading chatbot interface... Please wait."):
st.session_state.username = username
st.session_state.chat_history = [] # Initialize empty chat history
st.session_state.vectorstore = setup_vectorstore()
st.session_state.conversational_chain = chat_chain(st.session_state.vectorstore)
st.success(f"Welcome, {username}! The chatbot interface is ready.")
else:
username = st.session_state.username
# Step 2: Initialize components if not already set
if "conversational_chain" not in st.session_state:
st.session_state.vectorstore = setup_vectorstore()
st.session_state.conversational_chain = chat_chain(st.session_state.vectorstore)
# Step 3: Show chatbot interface
if "username" in st.session_state:
st.subheader(f"Hello {username}, start your query below!")
# Display existing chat history dynamically
for message in st.session_state.chat_history:
if message["role"] == "user":
with st.chat_message("user"):
st.markdown(message["content"])
elif message["role"] == "assistant":
with st.chat_message("assistant"):
st.markdown(message["content"])
# User input section
user_input = st.chat_input("Ask AI....")
if user_input:
with st.spinner("Processing your query... Please wait."):
# Save user input to session state
st.session_state.chat_history.append({"role": "user", "content": user_input})
# Display user's message
with st.chat_message("user"):
st.markdown(user_input)
# Get assistant's response
with st.chat_message("assistant"):
response = st.session_state.conversational_chain({"question": user_input})
assistant_response = response["answer"]
st.markdown(assistant_response)
# Save assistant's response to session state
st.session_state.chat_history.append({"role": "assistant", "content": assistant_response})
# Save chat history to file with timestamp
chat_histories = load_chat_history()
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if username not in chat_histories:
chat_histories[username] = []
chat_histories[username].append({
"timestamp": timestamp,
"user": user_input,
"assistant": assistant_response
})
save_chat_history(chat_histories)
# import os
# import json
# import streamlit as st
# from langchain_huggingface import HuggingFaceEmbeddings
# from langchain_chroma import Chroma
# from langchain_groq import ChatGroq
# from langchain.memory import ConversationBufferMemory
# from langchain.chains import ConversationalRetrievalChain
# from vectorize_documents import embeddings
# working_dir = os.path.dirname(os.path.abspath(__file__))
# config_data = json.load(open(f"{working_dir}/config.json"))
# GROQ_API_KEY = config_data["GROQ_API_KEY"]
# os.environ["GROQ_API_KEY"]= GROQ_API_KEY
# def setup_vectorstore():
# persist_directory = f"{working_dir}/vector_db_dir_notes_ai"
# embeddings = HuggingFaceEmbeddings()
# vectorstore = Chroma(persist_directory=persist_directory,
# embedding_function=embeddings)
# return vectorstore
# def chat_chain(vectorstore):
# llm = ChatGroq(
# model = "llama-3.1-70b-versatile",
# temperature = 0
# )
# retriever = vectorstore.as_retriever()
# memory = ConversationBufferMemory(
# llm = llm,
# output_key = "answer",
# memory_key = "chat_history",
# return_messages = True
# )
# chain = ConversationalRetrievalChain.from_llm(
# llm=llm,
# retriever = retriever,
# chain_type = "stuff",
# memory = memory,
# verbose=True,
# return_source_documents= True
# )
# return chain
# st.set_page_config(
# page_title="Notes.AI",
# page_icon="🤖AI",
# layout="centered"
# )
# st.title("🤖 Notes.AI")
# # st.title("🤖 Hey! Here you can search for notes of CSE 7th Sem! Read Notes, Read PYQ answers also!!")
# st.subheader("Hey! Here you can search for notes of CSE 7th Sem! Read Notes, Read PYQ answers also!!")
# # Additional subheading
# st.subheader("Start your query below to get instant help!")
# if "chat_history" not in st.session_state:
# st.session_state.chat_history = []
# if "vectorstore" not in st.session_state:
# st.session_state.vectorstore = setup_vectorstore()
# if "conversational_chain" not in st.session_state:
# st.session_state.conversational_chain = chat_chain(st.session_state.vectorstore)
# for message in st.session_state.chat_history:
# with st.chat_message(message["role"]):
# st.markdown(message["content"])
# user_input = st.chat_input("Ask AI....")
# if user_input:
# st.session_state.chat_history.append({"role":"user", "content":user_input})
# with st.chat_message("user"):
# st.markdown(user_input)
# with st.chat_message("assistant"):
# response = st.session_state.conversational_chain({"question":user_input})
# assistant_response = response["answer"]
# st.markdown(assistant_response)
# st.session_state.chat_history.append({"role":"assistant","content": assistant_response}) |