Spaces:
Sleeping
Sleeping
update
Browse files
agent.py
CHANGED
|
@@ -32,9 +32,20 @@ def get_graph(llm):
|
|
| 32 |
|
| 33 |
def call_model(state: State):
|
| 34 |
print("\n-------------------- Agent has been called -----------------------------------\n")
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
print("Agent has made a decision: ",response.content)
|
| 39 |
return {"messages": [response], "aggregate": ["Agent"]}
|
| 40 |
|
|
|
|
| 32 |
|
| 33 |
def call_model(state: State):
|
| 34 |
print("\n-------------------- Agent has been called -----------------------------------\n")
|
| 35 |
+
messages = state["messages"]
|
| 36 |
+
messages.append(HumanMessage(content="Write a plan how to solve this qustion?"))
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
prompt_plan = prompt_template.invoke(messages)
|
| 40 |
+
plan = llm.invoke(prompt_plan).content
|
| 41 |
+
messages.append(AIMessage(content=plan))
|
| 42 |
+
messages.append(HumanMessage(content="Now give me the answer to the question."))
|
| 43 |
+
prompt_answer = prompt_template.invoke(messages)
|
| 44 |
+
response = llm.invoke(prompt_answer)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
print("\nThe Prompt is: ", prompt_answer, "\n")
|
| 48 |
+
|
| 49 |
print("Agent has made a decision: ",response.content)
|
| 50 |
return {"messages": [response], "aggregate": ["Agent"]}
|
| 51 |
|