File size: 1,992 Bytes
d0840c7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19e1445
d0840c7
 
19e1445
d0840c7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b2333d3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
import os
import streamlit as st 
from agents import  SearchAgent
from langchain.vectorstores import FAISS
from langchain_google_genai import GoogleGenerativeAIEmbeddings
from config.config import model



            
embeddings = GoogleGenerativeAIEmbeddings(model="models/embedding-001")

class QAAgent:
    def __init__(self):
        
        self.model = model
        self.prompt = """You are a research assistant answering questions about academic papers. Use the following context from papers and chat history to provide accurate, specific answers.

        Previous conversation:
        {chat_history}

        Paper context:
        {context}

        Question: {question}

        Guidelines:
        1. Reference specific papers when making claims
        2. Use direct quotes when relevant
        3. Acknowledge if information isn't available in the provided context
        4. Maintain academic tone and precision
        """
        self.papers = None
        self.search_agent_response  = ""

    def solve(self, query):
        
            
        # Load vector store
        vector_db = FAISS.load_local("vector_db", embeddings, index_name="base_and_adjacent", allow_dangerous_deserialization=True)
        
        # Get chat history
        chat_history = st.session_state.get("chat_history", [])
        chat_history_text = "".join([f"{sender}: {msg}" for sender, msg in chat_history[-5:]])  # Last 5 messages
        
        # Get relevant chunks
        retrieved = vector_db.as_retriever().get_relevant_documents(query)
        context = "".join([f"{doc.page_content}\n Source: {doc.metadata['source']}" for doc in retrieved])
        
        # Generate response
        full_prompt = self.prompt.format(
            chat_history=chat_history_text,
            context=context,
            question=query
        )
        
        response = self.model.generate_content(str(self.search_agent_response)  + full_prompt)
        return response.text , self.papers