brainsqueeze commited on
Commit
f002446
·
verified ·
1 Parent(s): b917db3

Delete ask_candid/tools/question_reformulation.py

Browse files
ask_candid/tools/question_reformulation.py DELETED
@@ -1,68 +0,0 @@
1
- from langchain_core.prompts import ChatPromptTemplate
2
- from langchain_core.output_parsers import StrOutputParser
3
- from langchain_core.language_models.llms import LLM
4
-
5
- from ask_candid.agents.schema import AgentState
6
-
7
-
8
- def reformulate_question_using_history(
9
- state: AgentState,
10
- llm: LLM,
11
- focus_on_recommendations: bool = False
12
- ) -> AgentState:
13
- """Transform the query to produce a better query with details from previous messages and emphasize aspects important
14
- for recommendations if needed.
15
-
16
- Parameters
17
- ----------
18
- state : AgentState
19
- The current state
20
- llm : LLM
21
- focus_on_recommendations : bool, optional
22
- Flag to determine if the reformulation should emphasize recommendation-relevant aspects such as geographies,
23
- cause areas, etc., by default False
24
-
25
- Returns
26
- -------
27
- AgentState
28
- The updated state
29
- """
30
-
31
- print("---REFORMULATE THE USER INPUT---")
32
- messages = state["messages"]
33
- question = messages[-1].content
34
-
35
- if len(messages[:-1]) > 1: # need to skip the system message
36
- if focus_on_recommendations:
37
- prompt_text = """Given a chat history and the latest user input which might reference context in the chat
38
- history, especially geographic locations, cause areas and/or population groups, formulate a standalone input
39
- which can be understood without the chat history.
40
- Chat history: ```{chat_history}```
41
- User input: ```{question}```
42
-
43
- Reformulate the question without adding implications or assumptions about the user's needs or intentions.
44
- Focus solely on clarifying any contextual details present in the original input."""
45
- else:
46
- prompt_text = """Given a chat history and the latest user input which might reference context in the chat
47
- history, formulate a standalone input which can be understood without the chat history. Include hints as to
48
- what the user is getting at given the context in the chat history.
49
- Chat history: ```{chat_history}```
50
- User input: ```{question}```
51
-
52
- Do NOT answer the question, just reformulate it if needed and otherwise return it as is.
53
- """
54
-
55
- contextualize_q_prompt = ChatPromptTemplate([
56
- ("system", prompt_text),
57
- ("human", question),
58
- ])
59
-
60
- rag_chain = contextualize_q_prompt | llm | StrOutputParser()
61
- # new_question = rag_chain.invoke({"chat_history": messages, "question": question})
62
- new_question = rag_chain.invoke({
63
- "chat_history": '\n'.join(f"{m.type.upper()}: {m.content}" for m in messages[1:]),
64
- "question": question
65
- })
66
- print(f"user asked: '{question}', agent reformulated the question basing on the chat history: {new_question}")
67
- return {"messages": [new_question], "user_input" : question}
68
- return {"messages": [question], "user_input" : question}