Spaces:
Sleeping
Sleeping
| from langchain_core.prompts import ChatPromptTemplate | |
| from langchain_core.runnables import RunnablePassthrough | |
| from .load_llm import LLM_MODEL | |
| def generate_response_from_context(retriever, question: str): | |
| prompt = ChatPromptTemplate.from_messages([ | |
| ("human", "Answer this using context:\n{context}\n\nQuestion:\n{question}") | |
| ]) | |
| rag_chain = {"context": retriever, "question": RunnablePassthrough()} | prompt | LLM_MODEL | |
| return rag_chain.invoke(question).content |