RAGTechniquesComparisonTool / QueryDecomposition.py
DeathBlade020's picture
Upload 8 files
6c044be verified
from langchain.prompts import ChatPromptTemplate
from operator import itemgetter
from langchain_core.output_parsers import StrOutputParser
from langchain import hub
from helper import get_llm, get_retriever
def format_qa_pair(question, answer):
"""Format Q and A pair"""
formatted_string = ""
formatted_string += f"Question: {question}\nAnswer: {answer}\n\n"
return formatted_string.strip()
def retrieve_and_rag(question,prompt_rag,sub_question_generator_chain, link):
"""RAG on each sub-question"""
# Use our decomposition
sub_questions = sub_question_generator_chain.invoke({"question":question})
# Initialize a list to hold RAG chain results
rag_results = []
retrievar = get_retriever(link)
llm = get_llm()
for sub_question in sub_questions:
# Retrieve documents for each sub-question
retrieved_docs = retrievar.get_relevant_documents(sub_question)
# Use retrieved documents and sub-question in RAG chain
answer = (prompt_rag | llm | StrOutputParser()).invoke({"context": retrieved_docs,
"question": sub_question})
rag_results.append(answer)
return rag_results,sub_questions
def get_answer_using_query_decomposition(link: str, question: str):
# Decomposition
template = """You are a helpful assistant that generates multiple sub-questions related to an input question. \n
The goal is to break down the input into a set of sub-problems / sub-questions that can be answered in isolation. \n
Generate multiple search queries related to: {question} \n
Output (3 queries):"""
prompt_decomposition = ChatPromptTemplate.from_template(template)
llm = get_llm()
generate_queries_decomposition = (
prompt_decomposition |
llm |
StrOutputParser() |
(lambda x: x.split("\n"))
)
questions = generate_queries_decomposition.invoke({"question":question})
template = """Here is the question you need to answer:
\n --- \n {question} \n --- \n
Here is any available background question + answer pairs:
\n --- \n {q_a_pairs} \n --- \n
Here is additional context relevant to the question:
\n --- \n {context} \n --- \n
Use the above context and any background question + answer pairs to answer the question: \n {question}
"""
decomposition_prompt = ChatPromptTemplate.from_template(template)
q_a_pairs = ""
retrievar = get_retriever(link)
answer = ""
prompt_rag = hub.pull("rlm/rag-prompt")
for q in questions:
rag_chain = (
{
"context": itemgetter("question") | retrievar,
"question": itemgetter("question"),
"q_a_pairs": itemgetter("q_a_pairs")
}
| decomposition_prompt
| llm
| StrOutputParser()
)
answer = rag_chain.invoke({"question":q,"q_a_pairs":q_a_pairs})
q_a_pair = format_qa_pair(q,answer)
q_a_pairs = q_a_pairs + "\n---\n"+ q_a_pair
answers, questions = retrieve_and_rag(question, prompt_rag, generate_queries_decomposition, link)
return answers[0]
# if __name__ == "__main__":
# link = "https://lilianweng.github.io/posts/2023-06-23-agent/"
# question = "What is task decomposition for LLM agents?"
# answer = get_answer(link, question)
# print(answer)