File size: 2,064 Bytes
f45999f
 
 
 
 
dfd09d4
f45999f
 
 
 
 
34e2fa0
f45999f
 
 
dfd09d4
 
 
 
f45999f
 
 
 
 
 
 
 
 
 
 
34e2fa0
 
 
 
 
 
f45999f
 
 
 
 
 
dfd09d4
f45999f
34e2fa0
 
8909b6d
34e2fa0
f45999f
 
 
 
 
8909b6d
f45999f
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
from langgraph.graph.message import add_messages
from langgraph.func import entrypoint
from langchain_core.messages import BaseMessage
from langchain_core.runnables import RunnableConfig

from .tasks import call_model, get_structued_output, manage_memories, call_tool, get_recent_memories


@entrypoint()
def agent(messages: list[BaseMessage], config: RunnableConfig):
    tool_calls_count = 0
    tool_names_called = set()
    answer = None
    links = []

    # Fetch Recent User Information
    recent_memories = get_recent_memories().result()

    llm_future = call_model(messages, memories=recent_memories)
    memories_future = manage_memories(messages[-1].content)

    # Now, wait for both to complete
    llm_response = llm_future.result()
    memories = memories_future.result()

    while True:
        if not llm_response.tool_calls:
            break

        # Execute tools
        tool_results_future = []
        for tool_call in llm_response.tool_calls:
            tool_names_called.add(tool_call["name"])
            tool_results_future.append(call_tool(tool_call))

        tool_results = [fut.result() for fut in tool_results_future]
        tool_calls_count += len(tool_results)

        # Append to message list
        messages = add_messages(messages, [llm_response, *tool_results])

        # Call model again
        llm_response = call_model(messages, memories=recent_memories).result()

    # Check if any tools other than search_memories were called
    other_tools_called = any(name != "search_memories" for name in tool_names_called)
    
    if tool_calls_count > 0 and other_tools_called:
        # Structure the final output
        structured_output = (
            get_structued_output(llm_response.content).result().model_dump()
        )
        answer = structured_output["text"]
        links = [str(link) for link in structured_output.get("links", [])]
    else:
        answer = llm_response.content

    return {
        "answer": answer,
        "links": links,
        "messages": messages + [llm_response]
        }