carolinacon commited on
Commit
5813885
·
1 Parent(s): 8073bab

Preprocessing of the initial question

Browse files
Files changed (7) hide show
  1. app.py +7 -4
  2. config/prompts.yaml +11 -1
  3. core/agent.py +15 -6
  4. core/edges.py +0 -0
  5. core/state.py +1 -0
  6. nodes/nodes.py +20 -5
  7. utils/prompt_manager.py +1 -0
app.py CHANGED
@@ -3,6 +3,8 @@ import gradio as gr
3
  import requests
4
  import pandas as pd
5
 
 
 
6
  # (Keep Constants as is)
7
  # --- Constants ---
8
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
@@ -10,13 +12,14 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
10
  # --- Basic Agent Definition ---
11
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
12
  class BasicAgent:
 
13
  def __init__(self):
14
- print("BasicAgent initialized.")
15
  def __call__(self, question: str) -> str:
16
  print(f"Agent received question (first 50 chars): {question[:50]}...")
17
- fixed_answer = "This is a default answer."
18
- print(f"Agent returning fixed answer: {fixed_answer}")
19
- return fixed_answer
20
 
21
  def run_and_submit_all( profile: gr.OAuthProfile | None):
22
  """
 
3
  import requests
4
  import pandas as pd
5
 
6
+ from core.agent import GaiaAgent
7
+
8
  # (Keep Constants as is)
9
  # --- Constants ---
10
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
 
12
  # --- Basic Agent Definition ---
13
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
14
  class BasicAgent:
15
+ agent: GaiaAgent
16
  def __init__(self):
17
+ self.agent = GaiaAgent()
18
  def __call__(self, question: str) -> str:
19
  print(f"Agent received question (first 50 chars): {question[:50]}...")
20
+ answer = self.agent.__call__(question)
21
+ print(f"Agent returning fixed answer: {answer}")
22
+ return answer
23
 
24
  def run_and_submit_all( profile: gr.OAuthProfile | None):
25
  """
config/prompts.yaml CHANGED
@@ -85,4 +85,14 @@ prompts:
85
  type: memory_optimization
86
  variables: ["summary"]
87
  version: 1.0
88
- description: "Prompt for summarization and memory optimization"
 
 
 
 
 
 
 
 
 
 
 
85
  type: memory_optimization
86
  variables: ["summary"]
87
  version: 1.0
88
+ description: "Prompt for summarization and memory optimization"
89
+ question_evaluation:
90
+ content: |
91
+ You are a general AI assistant. You are given with a question. Evaluate this question and answer with a YES if the question involves the analysis of
92
+ an attached file or video link, otherwise with a NO.
93
+ This is the question to be evaluated
94
+ {{question}}
95
+ type: question_refinement
96
+ variables: ["question"]
97
+ version: 1.0
98
+ description: "Prompt for evaluating a question"
core/agent.py CHANGED
@@ -1,9 +1,9 @@
1
  from langchain_core.messages import HumanMessage
2
  from langgraph.graph.state import CompiledStateGraph
3
 
4
-
5
  from core.state import State
6
- from nodes.nodes import assistant, optimize_memory, response_processing
7
  from tools.tavily_tools import llm_tools
8
 
9
  from langgraph.graph import START, StateGraph, END
@@ -19,13 +19,20 @@ class GaiaAgent:
19
  builder = StateGraph(State)
20
 
21
  # Define nodes: these do the work
 
22
  builder.add_node("assistant", assistant)
23
  builder.add_node("tools", ToolNode(llm_tools))
24
  builder.add_node("optimize_memory", optimize_memory)
25
  builder.add_node("response_processing", response_processing)
26
 
27
  # Define edges: these determine how the control flow moves
28
- builder.add_edge(START, "assistant")
 
 
 
 
 
 
29
  builder.add_conditional_edges(
30
  "assistant",
31
  # If the latest message (result) from assistant is a tool call -> tools_condition routes to tools If the
@@ -33,6 +40,7 @@ class GaiaAgent:
33
  # response_processing
34
  tools_condition, {"tools": "tools", "__end__": "response_processing"}
35
  )
 
36
  builder.add_edge("tools", "optimize_memory")
37
  builder.add_edge("optimize_memory", "assistant")
38
  builder.add_edge("response_processing", END)
@@ -41,10 +49,11 @@ class GaiaAgent:
41
  def __call__(self, question: str) -> str:
42
  messages = [HumanMessage(content=question)]
43
  messages = self.react_graph.invoke({"messages": messages})
44
- for m in messages['messages']:
45
- m.pretty_print()
46
 
47
- return m
 
48
 
49
  def __streamed_call__(self, question: str) -> str:
50
  # Test the web agent
 
1
  from langchain_core.messages import HumanMessage
2
  from langgraph.graph.state import CompiledStateGraph
3
 
4
+ from core.edges import file_condition
5
  from core.state import State
6
+ from nodes.nodes import assistant, optimize_memory, response_processing, orchestrator
7
  from tools.tavily_tools import llm_tools
8
 
9
  from langgraph.graph import START, StateGraph, END
 
19
  builder = StateGraph(State)
20
 
21
  # Define nodes: these do the work
22
+ builder.add_node("orchestrator", orchestrator)
23
  builder.add_node("assistant", assistant)
24
  builder.add_node("tools", ToolNode(llm_tools))
25
  builder.add_node("optimize_memory", optimize_memory)
26
  builder.add_node("response_processing", response_processing)
27
 
28
  # Define edges: these determine how the control flow moves
29
+ builder.add_edge(START, "orchestrator")
30
+ builder.add_conditional_edges("orchestrator",
31
+ # If the question involves a file processing -> file_condition routes to the END state
32
+ # If the question does not involve a file processing -> tools_condition routes to
33
+ # assistant
34
+ file_condition)
35
+
36
  builder.add_conditional_edges(
37
  "assistant",
38
  # If the latest message (result) from assistant is a tool call -> tools_condition routes to tools If the
 
40
  # response_processing
41
  tools_condition, {"tools": "tools", "__end__": "response_processing"}
42
  )
43
+
44
  builder.add_edge("tools", "optimize_memory")
45
  builder.add_edge("optimize_memory", "assistant")
46
  builder.add_edge("response_processing", END)
 
49
  def __call__(self, question: str) -> str:
50
  messages = [HumanMessage(content=question)]
51
  messages = self.react_graph.invoke({"messages": messages})
52
+ # for m in messages['messages']:
53
+ # m.pretty_print()
54
 
55
+ answer = messages['messages'][-1].content
56
+ return answer
57
 
58
  def __streamed_call__(self, question: str) -> str:
59
  # Test the web agent
core/edges.py ADDED
File without changes
core/state.py CHANGED
@@ -4,3 +4,4 @@ from langgraph.graph import MessagesState
4
  class State(MessagesState):
5
  summary: str
6
  question: str
 
 
4
  class State(MessagesState):
5
  summary: str
6
  question: str
7
+ attachment: str
nodes/nodes.py CHANGED
@@ -13,6 +13,23 @@ response_processing_model = ChatOpenAI(model="gpt-4.1-mini")
13
  model = model.bind_tools(llm_tools, parallel_tool_calls=False)
14
 
15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  # Node
17
  def assistant(state: State):
18
  # set up the question
@@ -22,7 +39,7 @@ def assistant(state: State):
22
  # Get original question if it exists
23
  question = state.get("question", "")
24
  if not question:
25
- question = state["messages"][0]
26
 
27
  sys_msg = SystemMessage(content=prompt_mgmt.render_template("base_system", {"summary": summary}))
28
  try:
@@ -39,9 +56,7 @@ def assistant(state: State):
39
  def response_processing(state: State):
40
  question = state.get("question", "")
41
  answer = state["messages"][-1]
42
- print("Question:", question.content)
43
- print("Answer:", answer.content)
44
- gaia_messages = [HumanMessage(content=question.content), AIMessage(content=answer.content)]
45
  gaia_sys_msg = SystemMessage(content=prompt_mgmt.render_template("final_answer_processor", {}))
46
  response = response_processing_model.invoke([gaia_sys_msg] + gaia_messages)
47
 
@@ -56,7 +71,7 @@ def optimize_memory(state: State):
56
  if summary:
57
 
58
  # A summary already exists
59
- summary_message = prompt_mgmt.render_template("summarization", {"summary":summary})
60
 
61
  else:
62
  summary_message = "Create a summary of the conversation above:"
 
13
  model = model.bind_tools(llm_tools, parallel_tool_calls=False)
14
 
15
 
16
+ # Node
17
+ def orchestrator(state: State):
18
+ # Get original question if it exists
19
+ question = state.get("question", "")
20
+ if not question:
21
+ question = state["messages"][0].content
22
+
23
+ message = prompt_mgmt.render_template("question_evaluation", {"question": question})
24
+
25
+ # Add prompt to our history
26
+ messages = [HumanMessage(content=message)]
27
+ response = response_processing_model.invoke(messages)
28
+ if response.content == "YES":
29
+ return {"question": question, "attachment": "true", "messages":[response]}
30
+ return {"question": question}
31
+
32
+
33
  # Node
34
  def assistant(state: State):
35
  # set up the question
 
39
  # Get original question if it exists
40
  question = state.get("question", "")
41
  if not question:
42
+ question = state["messages"][0].content
43
 
44
  sys_msg = SystemMessage(content=prompt_mgmt.render_template("base_system", {"summary": summary}))
45
  try:
 
56
  def response_processing(state: State):
57
  question = state.get("question", "")
58
  answer = state["messages"][-1]
59
+ gaia_messages = [HumanMessage(content=question), AIMessage(content=answer.content)]
 
 
60
  gaia_sys_msg = SystemMessage(content=prompt_mgmt.render_template("final_answer_processor", {}))
61
  response = response_processing_model.invoke([gaia_sys_msg] + gaia_messages)
62
 
 
71
  if summary:
72
 
73
  # A summary already exists
74
+ summary_message = prompt_mgmt.render_template("summarization", {"summary": summary})
75
 
76
  else:
77
  summary_message = "Create a summary of the conversation above:"
utils/prompt_manager.py CHANGED
@@ -11,6 +11,7 @@ class PromptType(Enum):
11
  BASE_SYSTEM = "base_system"
12
  ANSWER_REFINEMENT = "answer_refinement"
13
  MEMORY_OPTIMIZATION = "memory_optimization"
 
14
 
15
 
16
  @dataclass
 
11
  BASE_SYSTEM = "base_system"
12
  ANSWER_REFINEMENT = "answer_refinement"
13
  MEMORY_OPTIMIZATION = "memory_optimization"
14
+ QUESTION_REFINEMENT = "question_refinement"
15
 
16
 
17
  @dataclass