Doc_rag / agent.py
Abdul-Haseeb's picture
Upload 4 files
63abe46 verified
from groq import Groq
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import FAISS
import os
from dotenv import load_dotenv
load_dotenv()
client = Groq(api_key=os.getenv('GROQ_API_KEY'))
embeddings = HuggingFaceEmbeddings(
model_name="sentence-transformers/all-MiniLM-L6-v2"
)
vectorstore = FAISS.load_local(
"vectorstore",
embeddings,
allow_dangerous_deserialization=True
)
conversation_state = {
"summary": ""
}
def engineering_agent(query):
global conversation_state
docs = vectorstore.similarity_search(query, k=4)
retrieved_context = "\n\n".join(doc.page_content for doc in docs)
combined_context = f"""
Previous discussion summary:
{conversation_state['summary']}
Retrieved document context:
{retrieved_context}
"""
prompt = f"""
You are an engineering documentation assistant.
Use ONLY the information provided below.
You may summarize or refer to earlier discussion.
Do NOT add external knowledge.
If insufficient information is available, respond:
"Not found in the documents."
{combined_context}
Question:
{query}
"""
response = client.chat.completions.create(
model="llama-3.1-8b-instant",
messages=[{"role": "user", "content": prompt}],
temperature=0.2
)
answer = response.choices[0].message.content
# Update running summary (very important)
conversation_state["summary"] += f"\nQ: {query}\nA: {answer}"
return answer, docs