File size: 1,635 Bytes
1feb3e6
8d75349
440fca8
61ec036
1feb3e6
 
 
 
cda7d7b
1feb3e6
440fca8
 
1feb3e6
8d75349
61ec036
1feb3e6
440fca8
 
1feb3e6
61ec036
1feb3e6
 
61ec036
 
1feb3e6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
# components/pdf_chat.py

import streamlit as st
from langchain_core.messages import AIMessage
from utils.retriever import load_vectorstore
from langchain_core.runnables import RunnablePassthrough
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_groq import ChatGroq


def run_pdf_qa():
    st.header("💬 Ask Your PDF")

    if "pdf_path" not in st.session_state:
        st.warning("⚠️ Please upload a PDF first in the 'Upload Business Docs' section.")
        return

    st.markdown(f"**Using file:** `{st.session_state.pdf_path.split('/')[-1]}`")

    # User Question Input
    question = st.text_input("❓ Ask a question based on your uploaded PDF", placeholder="e.g. What is the business model?")

    if question:
        # Load vectorstore retriever
        retriever = load_vectorstore(st.session_state.pdf_path)

        # Prompt and Model
        prompt = ChatPromptTemplate.from_messages([
            ("system", "You are a helpful assistant. Use the retrieved context to answer the user's question."),
            ("human", "Context:\n{context}\n\nQuestion: {question}")
        ])
        model = ChatGroq(temperature=0.2, model_name="LLaMA3-8b-8192")

        rag_chain = (
            {"context": retriever | RunnablePassthrough(), "question": RunnablePassthrough()}
            | prompt
            | model
            | StrOutputParser()
        )

        with st.spinner("Thinking..."):
            response = rag_chain.invoke(question)

        # Display Answer
        st.markdown("### 💡 Answer:")
        st.write(response)