Spaces:
Sleeping
Sleeping
File size: 2,543 Bytes
a2e6ba0 080228a d96361e 080228a a2e6ba0 1e29e93 d96361e 1e29e93 d96361e 1e29e93 af35db4 080228a d96361e 080228a 1e29e93 d96361e 1e29e93 af35db4 080228a 1e29e93 af35db4 1e29e93 d96361e 080228a d96361e 080228a d96361e 1e29e93 080228a 799f214 1e29e93 799f214 d96361e 080228a d96361e 1e29e93 d96361e 1e29e93 d96361e | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 | import streamlit as st
from pypdf import PdfReader
import io
from gemini_kit import get_llm
from langchain_core.messages import HumanMessage
# Initialize session state for the PDF text and messages
if 'pdf' not in st.session_state:
st.session_state.pdf = ""
if 'messages' not in st.session_state:
st.session_state.messages = []
if 'extract' not in st.session_state:
st.session_state.extract = True
def upload_pdf():
print(st.session_state.extract)
print(st.session_state.pdf[:10])
uploaded_file = st.file_uploader("Choose a PDF file", type="pdf")
if (uploaded_file is not None) & st.session_state.extract:
st.write("Waiting for pdf to be extracted ...")
pdf_reader = PdfReader(io.BytesIO(uploaded_file.read()))
text = ""
for page_num in range(len(pdf_reader.pages)):
page = pdf_reader.pages[page_num]
text += page.extract_text()
# Store the extracted text in session state
st.session_state.pdf = text
st.session_state.extract = False
st.write("PDF Text Extracted. You can chat now!!")
if uploaded_file is None:
st.session_state.extract = True
def chatbot_ui():
user_input = st.text_input("You: ", "")
if user_input:
st.session_state.messages.append({"user": user_input})
if st.session_state.pdf:
response = generate_response(st.session_state.pdf, user_input)
else:
response = "Please upload a PDF to get started."
st.session_state.messages.append({"Assistant": response})
chat = st.button("Clear Chat")
if chat:
st.session_state.messages = []
for message in st.session_state.messages:
if "user" in message:
st.markdown(f"**You:** {message['user']}")
else:
st.markdown(f"**Assistant:** {message['Assistant']}")
def generate_response(pdf, user_input):
message = f"This is the text extracted from the pdf: {pdf}. The user query is {user_input}."
llm = get_llm()
try:
response = llm.invoke(message).content
except Exception as e:
response = "Error occurred. This might be due to exhaustion of LLM quota or your PDF might be much bigger. The exact error: " + str(e)
return response
def main():
st.title("NCERT PDF Based AI Assistant")
st.header("Upload a PDF")
# Call upload_pdf() only when the file uploader is interacted with
upload_pdf()
st.header("Chatbot")
chatbot_ui()
if __name__ == "__main__":
main()
|