File size: 2,567 Bytes
3a84497
427d8fa
3a84497
 
2d86f45
427d8fa
3a84497
427d8fa
 
3a84497
427d8fa
 
 
 
 
 
fa3db56
427d8fa
 
 
fa3db56
 
 
 
 
 
427d8fa
fa3db56
3a84497
 
427d8fa
2d86f45
427d8fa
 
 
 
2d86f45
427d8fa
 
2d86f45
427d8fa
 
 
2d86f45
427d8fa
2d86f45
427d8fa
2d86f45
427d8fa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2d86f45
427d8fa
2d86f45
427d8fa
2d86f45
 
 
427d8fa
 
 
 
2d86f45
292480c
427d8fa
292480c
427d8fa
 
292480c
 
2d86f45
427d8fa
292480c
 
427d8fa
292480c
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import os
import requests
import streamlit as st
from langchain.vectorstores import FAISS
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.document_loaders import PyPDFLoader
from langchain.prompts import PromptTemplate
from langchain.chains import RetrievalQA
from groq import Groq

# Initialize Groq client
client = Groq(api_key=os.environ.get("GROQ_API_KEY"))


class GroqLLM:
    def _call(self, prompt: str, stop=None):
        response = client.chat.completions.create(
            messages=[
                {"role": "user", "content": prompt},
            ],
            model="llama-3.3-70b-versatile",
        )
        return response.choices[0].message.content

    @property
    def _llm_type(self) -> str:
        return "Groq"


# Title of the application
st.title("PPRA Rules 2004 - Q&A Application")

# Download and load the PDF
pdf_url = "https://drive.google.com/uc?id=1faNpSV_UIZzd3h08qtzvSRGmzDkNtmuA"
pdf_response = requests.get(pdf_url)
pdf_path = "ppra_rules_2004.pdf"

with open(pdf_path, "wb") as f:
    f.write(pdf_response.content)

# Load the PDF document
loader = PyPDFLoader(pdf_path)
documents = loader.load()

# Initialize the embeddings and vectorstore
embeddings = HuggingFaceEmbeddings()
vectorstore = FAISS.from_documents(documents, embeddings)
retriever = vectorstore.as_retriever()

# Custom prompt template
prompt_template = """
You are an AI assistant tasked with answering questions about the Public Procurement Rules, 2004 (PPRA Rules) in Pakistan.
Use the provided context to answer the user's question as accurately as possible.

Context:
{context}

Question:
{query}

Answer:
"""
qa_prompt = PromptTemplate(input_variables=["context", "query"], template=prompt_template)

# Create the QA chain
qa_chain = RetrievalQA.from_chain_type(
    llm=GroqLLM(),
    retriever=retriever,
    combine_documents_chain_kwargs={"prompt": qa_prompt},
    return_source_documents=True,
)

# User interaction
st.subheader("Ask Questions About PPRA Rules 2004")
user_query = st.text_input("Enter your question:")

if user_query:
    try:
        # Run the query through the QA chain
        response = qa_chain({"query": user_query})

        # Display the answer
        st.subheader("Answer:")
        st.write(response["result"])

        # Display the relevant sources
        st.subheader("Relevant Sources:")
        for doc in response["source_documents"]:
            st.write(doc.page_content[:500])  # Show the first 500 characters
    except Exception as e:
        st.error(f"An error occurred: {str(e)}")