| import os
|
| from dotenv import load_dotenv
|
| from langchain.prompts import ChatPromptTemplate
|
| from langchain.load import dumps, loads
|
| from operator import itemgetter
|
| from langchain_core.output_parsers import StrOutputParser
|
| from langchain_openai import ChatOpenAI
|
|
|
| from helper import get_retriever
|
|
|
| load_dotenv()
|
|
|
|
|
| def reciprocal_rank_fusion(results: list[list], k=60):
|
| """ Reciprocal_rank_fusion that takes multiple lists of ranked documents
|
| and an optional parameter k used in the RRF formula """
|
|
|
|
|
| fused_scores = {}
|
|
|
|
|
| for docs in results:
|
|
|
| for rank, doc in enumerate(docs):
|
|
|
| doc_str = dumps(doc)
|
|
|
| if doc_str not in fused_scores:
|
| fused_scores[doc_str] = 0
|
|
|
|
|
| fused_scores[doc_str] += 1 / (rank + k)
|
|
|
|
|
| reranked_results = [
|
| (loads(doc), score)
|
| for doc, score in sorted(fused_scores.items(), key=lambda x: x[1], reverse=True)
|
| ]
|
|
|
| for i, (doc, score) in enumerate(reranked_results):
|
| print(f"Rank {i+1} - Score: {score:.4f}")
|
|
|
|
|
| return reranked_results
|
|
|
|
|
| def get_answer_using_rag_fusion(link: str, question: str):
|
|
|
|
|
| template = """You are a helpful assistant that generates multiple search queries based on a single input query. \n
|
| Generate multiple search queries related to: {question} \n
|
| Output (4 queries):"""
|
| prompt_rag_fusion = ChatPromptTemplate.from_template(template)
|
|
|
| generate_queries = (
|
| prompt_rag_fusion
|
| | ChatOpenAI(temperature=0)
|
| | StrOutputParser()
|
| | (lambda x: x.split("\n"))
|
| )
|
| retrievar = get_retriever(link)
|
| retrieval_chain_rag_fusion = generate_queries | retrievar.map() | reciprocal_rank_fusion
|
| docs = retrieval_chain_rag_fusion.invoke({"question": question})
|
| template = """Answer the following question based on this context:
|
|
|
| {context}
|
|
|
| Question: {question}
|
| """
|
|
|
| prompt = ChatPromptTemplate.from_template(template)
|
| llm = ChatOpenAI(temperature=0)
|
| final_rag_chain = (
|
| {"context": retrieval_chain_rag_fusion,
|
| "question": itemgetter("question")}
|
| | prompt
|
| | llm
|
| | StrOutputParser()
|
| )
|
|
|
| response = final_rag_chain.invoke({"question":question})
|
| return response
|
|
|
|
|
|
|
|
|
|
|
|
|
| |