| | from langchain.prompts import ChatPromptTemplate
|
| | from operator import itemgetter
|
| | from langchain_core.output_parsers import StrOutputParser
|
| | from langchain_core.prompts import ChatPromptTemplate, FewShotChatMessagePromptTemplate
|
| | from langchain_openai import ChatOpenAI
|
| | from langchain_core.runnables import RunnableLambda
|
| | from helper import get_retriever
|
| |
|
| | def get_answer(link: str, question:str):
|
| | examples = [
|
| | {
|
| | "input": "Could the members of The Police perform lawful arrests?",
|
| | "output": "what can the members of The Police do?",
|
| | },
|
| | {
|
| | "input": "Jan Sindel’s was born in what country?",
|
| | "output": "what is Jan Sindel’s personal history?",
|
| | },
|
| | ]
|
| |
|
| | example_prompt = ChatPromptTemplate.from_messages(
|
| | [
|
| | ("human", "{input}"),
|
| | ("ai", "{output}"),
|
| | ]
|
| | )
|
| | few_shot_prompt = FewShotChatMessagePromptTemplate(
|
| | example_prompt=example_prompt,
|
| | examples=examples,
|
| | )
|
| | prompt = ChatPromptTemplate.from_messages(
|
| | [
|
| | (
|
| | "system",
|
| | """You are an expert at world knowledge. Your task is to step back and paraphrase a question to a more generic step-back question, which is easier to answer. Here are a few examples:""",
|
| | ),
|
| |
|
| | few_shot_prompt,
|
| |
|
| | ("user", "{question}"),
|
| | ]
|
| | )
|
| |
|
| | generate_queries_step_back = (
|
| | prompt |
|
| | ChatOpenAI(temperature=0) |
|
| | StrOutputParser()
|
| | )
|
| |
|
| | generate_queries_step_back.invoke({"question": question})
|
| |
|
| |
|
| | response_prompt_template = """You are an expert of world knowledge. I am going to ask you a question. Your response should be comprehensive and not contradicted with the following context if they are relevant. Otherwise, ignore them if they are not relevant.
|
| |
|
| | # {normal_context}
|
| | # {step_back_context}
|
| |
|
| | # Original Question: {question}
|
| | # Answer:"""
|
| | response_prompt = ChatPromptTemplate.from_template(response_prompt_template)
|
| | retrievar = get_retriever(link)
|
| | chain = (
|
| | {
|
| |
|
| | "normal_context": RunnableLambda(lambda x: x["question"]) | retrievar,
|
| |
|
| | "step_back_context": generate_queries_step_back | retrievar,
|
| |
|
| | "question": lambda x: x["question"],
|
| | }
|
| | | response_prompt
|
| | | ChatOpenAI(temperature=0)
|
| | | StrOutputParser()
|
| | )
|
| |
|
| | response = chain.invoke({"question": question})
|
| | return response
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|