my-doc-rag / utils /load_rag_chain.py
samagra44
initial commit
6e357ca
raw
history blame contribute delete
485 Bytes
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from .load_llm import LLM_MODEL
def generate_response_from_context(retriever, question: str):
prompt = ChatPromptTemplate.from_messages([
("human", "Answer this using context:\n{context}\n\nQuestion:\n{question}")
])
rag_chain = {"context": retriever, "question": RunnablePassthrough()} | prompt | LLM_MODEL
return rag_chain.invoke(question).content