File size: 3,551 Bytes
6c044be | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 | from langchain.prompts import ChatPromptTemplate
from operator import itemgetter
from langchain_core.output_parsers import StrOutputParser
from langchain import hub
from helper import get_llm, get_retriever
def format_qa_pair(question, answer):
"""Format Q and A pair"""
formatted_string = ""
formatted_string += f"Question: {question}\nAnswer: {answer}\n\n"
return formatted_string.strip()
def retrieve_and_rag(question,prompt_rag,sub_question_generator_chain, link):
"""RAG on each sub-question"""
# Use our decomposition
sub_questions = sub_question_generator_chain.invoke({"question":question})
# Initialize a list to hold RAG chain results
rag_results = []
retrievar = get_retriever(link)
llm = get_llm()
for sub_question in sub_questions:
# Retrieve documents for each sub-question
retrieved_docs = retrievar.get_relevant_documents(sub_question)
# Use retrieved documents and sub-question in RAG chain
answer = (prompt_rag | llm | StrOutputParser()).invoke({"context": retrieved_docs,
"question": sub_question})
rag_results.append(answer)
return rag_results,sub_questions
def get_answer_using_query_decomposition(link: str, question: str):
# Decomposition
template = """You are a helpful assistant that generates multiple sub-questions related to an input question. \n
The goal is to break down the input into a set of sub-problems / sub-questions that can be answered in isolation. \n
Generate multiple search queries related to: {question} \n
Output (3 queries):"""
prompt_decomposition = ChatPromptTemplate.from_template(template)
llm = get_llm()
generate_queries_decomposition = (
prompt_decomposition |
llm |
StrOutputParser() |
(lambda x: x.split("\n"))
)
questions = generate_queries_decomposition.invoke({"question":question})
template = """Here is the question you need to answer:
\n --- \n {question} \n --- \n
Here is any available background question + answer pairs:
\n --- \n {q_a_pairs} \n --- \n
Here is additional context relevant to the question:
\n --- \n {context} \n --- \n
Use the above context and any background question + answer pairs to answer the question: \n {question}
"""
decomposition_prompt = ChatPromptTemplate.from_template(template)
q_a_pairs = ""
retrievar = get_retriever(link)
answer = ""
prompt_rag = hub.pull("rlm/rag-prompt")
for q in questions:
rag_chain = (
{
"context": itemgetter("question") | retrievar,
"question": itemgetter("question"),
"q_a_pairs": itemgetter("q_a_pairs")
}
| decomposition_prompt
| llm
| StrOutputParser()
)
answer = rag_chain.invoke({"question":q,"q_a_pairs":q_a_pairs})
q_a_pair = format_qa_pair(q,answer)
q_a_pairs = q_a_pairs + "\n---\n"+ q_a_pair
answers, questions = retrieve_and_rag(question, prompt_rag, generate_queries_decomposition, link)
return answers[0]
# if __name__ == "__main__":
# link = "https://lilianweng.github.io/posts/2023-06-23-agent/"
# question = "What is task decomposition for LLM agents?"
# answer = get_answer(link, question)
# print(answer)
|