| # import os | |
| # import json | |
| # from datetime import datetime | |
| # import streamlit as st | |
| # from langchain_huggingface import HuggingFaceEmbeddings | |
| # from langchain_chroma import Chroma | |
| # from langchain_groq import ChatGroq | |
| # from langchain.memory import ConversationBufferMemory | |
| # from langchain.chains import ConversationalRetrievalChain | |
| # from vectorize_documents import embeddings | |
| # working_dir = os.path.dirname(os.path.abspath(__file__)) | |
| # config_data = json.load(open(f"{working_dir}/config.json")) | |
| # GROQ_API_KEY = config_data["GROQ_API_KEY"] | |
| # os.environ["GROQ_API_KEY"] = GROQ_API_KEY | |
| # # Ensure the JSON file exists | |
| # chat_history_file = "chat_histories.json" | |
| # if not os.path.exists(chat_history_file): | |
| # with open(chat_history_file, "w") as f: | |
| # json.dump({}, f) | |
| # # Functions to handle chat history | |
| # def load_chat_history(): | |
| # with open(chat_history_file, "r") as f: | |
| # return json.load(f) | |
| # def save_chat_history(chat_histories): | |
| # with open(chat_history_file, "w") as f: | |
| # json.dump(chat_histories, f, indent=4) | |
| # # Function to set up vectorstore | |
| # def setup_vectorstore(): | |
| # embeddings = HuggingFaceEmbeddings() | |
| # vectorstore = Chroma(persist_directory="vector_db_dir_notes_ai", | |
| # embedding_function=embeddings) | |
| # return vectorstore | |
| # # Function to set up chatbot chain | |
| # def chat_chain(vectorstore): | |
| # llm = ChatGroq( | |
| # model="llama-3.1-70b-versatile", | |
| # temperature=0 | |
| # ) | |
| # retriever = vectorstore.as_retriever() | |
| # memory = ConversationBufferMemory( | |
| # llm=llm, | |
| # output_key="answer", | |
| # memory_key="chat_history", | |
| # return_messages=True | |
| # ) | |
| # chain = ConversationalRetrievalChain.from_llm( | |
| # llm=llm, | |
| # retriever=retriever, | |
| # chain_type="stuff", | |
| # memory=memory, | |
| # verbose=True, | |
| # return_source_documents=True | |
| # ) | |
| # return chain | |
| # # Streamlit UI | |
| # st.set_page_config( | |
| # page_title="Notes.AI", | |
| # page_icon="🤖AI", | |
| # layout="centered" | |
| # ) | |
| # st.title("🤖 Notes.AI") | |
| # st.subheader("Hey! Upload your question bank and get answers instantly!") | |
| # # Step 1: Input user's name | |
| # if "username" not in st.session_state: | |
| # username = st.text_input("Enter your name to proceed:") | |
| # if username: | |
| # with st.spinner("Loading chatbot interface... Please wait."): | |
| # st.session_state.username = username | |
| # st.session_state.chat_history = [] # Initialize empty chat history | |
| # st.session_state.vectorstore = setup_vectorstore() | |
| # st.session_state.conversational_chain = chat_chain(st.session_state.vectorstore) | |
| # st.success(f"Welcome, {username}! The chatbot interface is ready.") | |
| # else: | |
| # username = st.session_state.username | |
| # # Step 2: Initialize components if not already set | |
| # if "conversational_chain" not in st.session_state: | |
| # st.session_state.vectorstore = setup_vectorstore() | |
| # st.session_state.conversational_chain = chat_chain(st.session_state.vectorstore) | |
| # # Step 3: File upload for question bank | |
| # st.subheader("Upload your question bank (PDF or DOC):") | |
| # uploaded_file = st.file_uploader("Choose a file", type=["pdf", "doc", "docx"]) | |
| # if uploaded_file: | |
| # # Process the uploaded file | |
| # with st.spinner("Reading and processing your question bank..."): | |
| # import docx2txt | |
| # from PyPDF2 import PdfReader | |
| # # Extract questions from the file | |
| # def extract_questions(file): | |
| # if file.name.endswith(".pdf"): | |
| # reader = PdfReader(file) | |
| # text = "\n".join([page.extract_text() for page in reader.pages]) | |
| # elif file.name.endswith((".doc", ".docx")): | |
| # text = docx2txt.process(file) | |
| # else: | |
| # text = "" | |
| # return text.strip().split("\n") | |
| # questions = extract_questions(uploaded_file) | |
| # # Generate answers using the LLM | |
| # answers = [] | |
| # for question in questions: | |
| # if question.strip(): | |
| # response = st.session_state.conversational_chain({"question": question}) | |
| # answers.append({"question": question, "answer": response["answer"]}) | |
| # # Save Q&A to a file | |
| # output_file_path = f"question_answers_{username}.txt" | |
| # with open(output_file_path, "w") as f: | |
| # for qa in answers: | |
| # f.write(f"Q: {qa['question']}\nA: {qa['answer']}\n\n") | |
| # st.success("All questions have been answered and saved!") | |
| # # Provide download link | |
| # with open(output_file_path, "rb") as f: | |
| # st.download_button( | |
| # label="Download Q&A File", | |
| # data=f, | |
| # file_name=output_file_path, | |
| # mime="text/plain" | |
| # ) | |
| # # Chatbot interface for additional questions | |
| # st.subheader(f"Hello {username}, ask additional questions below!") | |
| # if "username" in st.session_state: | |
| # # Display existing chat history dynamically | |
| # for message in st.session_state.chat_history: | |
| # if message["role"] == "user": | |
| # with st.chat_message("user"): | |
| # st.markdown(message["content"]) | |
| # elif message["role"] == "assistant": | |
| # with st.chat_message("assistant"): | |
| # st.markdown(message["content"]) | |
| # # User input section | |
| # user_input = st.chat_input("Ask AI....") | |
| # if user_input: | |
| # with st.spinner("Processing your query... Please wait."): | |
| # # Save user input to session state | |
| # st.session_state.chat_history.append({"role": "user", "content": user_input}) | |
| # # Display user's message | |
| # with st.chat_message("user"): | |
| # st.markdown(user_input) | |
| # # Get assistant's response | |
| # with st.chat_message("assistant"): | |
| # response = st.session_state.conversational_chain({"question": user_input}) | |
| # assistant_response = response["answer"] | |
| # st.markdown(assistant_response) | |
| # # Save assistant's response to session state | |
| # st.session_state.chat_history.append({"role": "assistant", "content": assistant_response}) | |
| # # Save chat history to file with timestamp | |
| # chat_histories = load_chat_history() | |
| # timestamp = datetime.now().strftime("%Y-%m-%d %A") # Added day to timestamp | |
| # if username not in chat_histories: | |
| # chat_histories[username] = [] | |
| # chat_histories[username].append({ | |
| # "timestamp": timestamp, | |
| # "user": user_input, | |
| # "assistant": assistant_response | |
| # }) | |
| # save_chat_history(chat_histories) | |
| #all working but file upload is not added in below code | |
| import os | |
| import json | |
| from datetime import datetime | |
| import streamlit as st | |
| from langchain_huggingface import HuggingFaceEmbeddings | |
| from langchain_chroma import Chroma | |
| from langchain_groq import ChatGroq | |
| from langchain.memory import ConversationBufferMemory | |
| from langchain.chains import ConversationalRetrievalChain | |
| from vectorize_documents import embeddings | |
| working_dir = os.path.dirname(os.path.abspath(__file__)) | |
| config_data = json.load(open(f"{working_dir}/config.json")) | |
| GROQ_API_KEY = config_data["GROQ_API_KEY"] | |
| os.environ["GROQ_API_KEY"]= GROQ_API_KEY | |
| # Ensure the JSON file exists | |
| chat_history_file = "chat_histories.json" | |
| if not os.path.exists(chat_history_file): | |
| with open(chat_history_file, "w") as f: | |
| json.dump({}, f) | |
| # Functions to handle chat history | |
| def load_chat_history(): | |
| with open(chat_history_file, "r") as f: | |
| return json.load(f) | |
| def save_chat_history(chat_histories): | |
| with open(chat_history_file, "w") as f: | |
| json.dump(chat_histories, f, indent=4) | |
| # Function to set up vectorstore | |
| def setup_vectorstore(): | |
| embeddings = HuggingFaceEmbeddings() | |
| vectorstore = Chroma(persist_directory="vector_db_dir_notes_ai", | |
| embedding_function=embeddings) | |
| return vectorstore | |
| # Function to set up chatbot chain | |
| def chat_chain(vectorstore): | |
| llm = ChatGroq( | |
| model="llama-3.1-70b-versatile", | |
| temperature=0 | |
| ) | |
| retriever = vectorstore.as_retriever() | |
| memory = ConversationBufferMemory( | |
| llm=llm, | |
| output_key="answer", | |
| memory_key="chat_history", | |
| return_messages=True | |
| ) | |
| chain = ConversationalRetrievalChain.from_llm( | |
| llm=llm, | |
| retriever=retriever, | |
| chain_type="stuff", | |
| memory=memory, | |
| verbose=True, | |
| return_source_documents=True | |
| ) | |
| return chain | |
| # Streamlit UI | |
| st.set_page_config( | |
| page_title="Notes.AI", | |
| page_icon="🤖AI", | |
| layout="centered" | |
| ) | |
| st.title("🤖 Notes.AI") | |
| st.subheader("Hey! Here you can search for notes of CSE 7th Sem! Read Notes, Read PYQ answers also!!") | |
| # Step 1: Input user's name | |
| if "username" not in st.session_state: | |
| username = st.text_input("Enter your name to proceed:") | |
| if username: | |
| with st.spinner("Loading chatbot interface... Please wait."): | |
| st.session_state.username = username | |
| st.session_state.chat_history = [] # Initialize empty chat history | |
| st.session_state.vectorstore = setup_vectorstore() | |
| st.session_state.conversational_chain = chat_chain(st.session_state.vectorstore) | |
| st.success(f"Welcome, {username}! The chatbot interface is ready.") | |
| else: | |
| username = st.session_state.username | |
| # Step 2: Initialize components if not already set | |
| if "conversational_chain" not in st.session_state: | |
| st.session_state.vectorstore = setup_vectorstore() | |
| st.session_state.conversational_chain = chat_chain(st.session_state.vectorstore) | |
| # Step 3: Show chatbot interface | |
| if "username" in st.session_state: | |
| st.subheader(f"Hello {username}, start your query below!") | |
| # Display existing chat history dynamically | |
| for message in st.session_state.chat_history: | |
| if message["role"] == "user": | |
| with st.chat_message("user"): | |
| st.markdown(message["content"]) | |
| elif message["role"] == "assistant": | |
| with st.chat_message("assistant"): | |
| st.markdown(message["content"]) | |
| # User input section | |
| user_input = st.chat_input("Ask AI....") | |
| if user_input: | |
| with st.spinner("Processing your query... Please wait."): | |
| # Save user input to session state | |
| st.session_state.chat_history.append({"role": "user", "content": user_input}) | |
| # Display user's message | |
| with st.chat_message("user"): | |
| st.markdown(user_input) | |
| # Get assistant's response | |
| with st.chat_message("assistant"): | |
| response = st.session_state.conversational_chain({"question": user_input}) | |
| assistant_response = response["answer"] | |
| st.markdown(assistant_response) | |
| # Save assistant's response to session state | |
| st.session_state.chat_history.append({"role": "assistant", "content": assistant_response}) | |
| # Save chat history to file with timestamp | |
| chat_histories = load_chat_history() | |
| timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") | |
| if username not in chat_histories: | |
| chat_histories[username] = [] | |
| chat_histories[username].append({ | |
| "timestamp": timestamp, | |
| "user": user_input, | |
| "assistant": assistant_response | |
| }) | |
| save_chat_history(chat_histories) | |