File size: 4,691 Bytes
85f3531
 
737d955
85f3531
 
 
 
 
 
 
 
 
 
 
 
966b1e0
 
 
737d955
 
0865717
85f3531
 
737d955
 
85f3531
 
 
 
 
 
 
 
 
 
 
 
 
966b1e0
 
 
 
737d955
 
 
 
 
 
 
 
 
 
 
 
 
85f3531
 
 
737d955
85f3531
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
737d955
 
 
 
 
 
 
 
 
 
 
 
 
85f3531
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
import os
import sys
from typing import List, TypedDict, Annotated, Optional
from dotenv import load_dotenv

# Add the project root directory to the Python path
# This allows finding modules in the 'tools' directory when running agent.py directly
project_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
if project_root not in sys.path:
    sys.path.insert(0, project_root)


from tools.calculator import add, subtract, multiply, divide # Importing calculator functions
from tools.wiki_search import wiki_search # Importing wiki search tool
from tools.web_search import web_search # Corrected import alias if needed, or use web_search_tool directly
from tools.analyze_csv import analyze_csv 
from tools.analyze_excel import analyze_excel
from tools.download_file import download_file
from tools.analyze_image import analyze_image
from tools.analyze_audio import analyze_audio
from tools.analyze_youtube import answer_question_about_youtube_video # Importing YouTube analysis toolS
#switch to using gemini 2.0 model 
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_core.messages import AnyMessage, SystemMessage, HumanMessage
from langgraph.graph.message import add_messages
from langgraph.graph import START, StateGraph, MessagesState
from langgraph.prebuilt import ToolNode, tools_condition



load_dotenv()

tools = [
    add,
    subtract,
    multiply,
    divide,
    wiki_search,
    web_search,
    analyze_csv,
    analyze_excel,
    download_file,
    analyze_image,
    analyze_audio,
    answer_question_about_youtube_video,]

with open("system_prompt.txt", "r", encoding="utf-8") as f:
    system = f.read()

system_message = SystemMessage(content=system)

class AgentState(TypedDict):
    input_file: Optional[str] #contains the input file path if there is any
    messages: Annotated[List[AnyMessage], add_messages] #contains the messages exchanged between the user and the agent


def create_agent(): #build graph
    try:
        llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash")
    except Exception as e:
        print(f"Error initializing LLM: {e}")
        return None 
        
    try:
        llm_with_tools = llm.bind_tools(tools)

        def assistant(state: MessagesState):
            """Assistant node"""
            response = llm_with_tools.invoke(state["messages"])
            return {"messages": [response]}

        builder = StateGraph(MessagesState)
        builder.add_node("assistant", assistant)
        builder.add_node("tools", ToolNode(tools))
        builder.add_edge(START, "assistant")
        builder.add_conditional_edges(
            "assistant",
            tools_condition,
        )
        builder.add_edge("tools", "assistant")
        react_graph = builder.compile()

        return react_graph
    except Exception as e:
        print(f"Error creating Agent {e}")
        return None


def main(): # Define an async main function
    agent = create_agent()
    if agent:
        print("\nAgent ready. Enter your query (or type 'quit' to exit):")
        while True:
            try:
                query = input("> ") # input() is blocking, consider aioconsole for fully async input if needed
                if query.lower() == 'quit':
                    break
                if query:
                    # Assuming agent.run is the correct async method for FunctionAgent
                    # Construct the initial messages list including the system prompt
                    initial_messages = [
                        system_message, # Include the system prompt read earlier
                        HumanMessage(content=query)
                    ]
                    # Invoke the agent with the messages state
                    response = agent.invoke({"messages": initial_messages})

                    # The final response from the graph is in the 'messages' list
                    # Get the last message, which should be the AI's response
                    answer = response["messages"][-1].content
                    # Print only the final answer without the "Agent: " prefix
                    print(answer)
            except EOFError:
                break
            except KeyboardInterrupt:
                print("\nExiting...")
                break
            except Exception as e:
                print(f"An error occurred during chat: {e}")
        print("Exiting agent chat.")
    else:
        print("Agent creation failed.")

#write a simple test here to check if the agent is working as expected
if __name__ == '__main__':
    try:
        main() # Run the async main function
    except KeyboardInterrupt:
        print("\nExiting program.")