File size: 3,571 Bytes
9dfaeea
9e6b745
9dfaeea
 
fc7df56
81843ad
dc7da23
6c0bb00
d978fd2
6c0bb00
9e6b745
dc7da23
3dc56c2
 
1a4958a
66fe407
3dc56c2
 
66fe407
3dc56c2
66fe407
fc7df56
dc7da23
fc7df56
 
 
 
 
 
 
 
 
 
66fe407
fc7df56
66fe407
fc7df56
 
 
66fe407
fc7df56
66fe407
fc7df56
3dc56c2
66fe407
 
3dc56c2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3d6a2f8
dc7da23
3d6a2f8
fc137e5
 
 
 
 
5702db5
dc7da23
9dfaeea
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dc72cd9
9dfaeea
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import asyncio
import os
from llama_index.core.agent.workflow import AgentWorkflow
from llama_index.llms.openai import OpenAI
from tools import (image_analyzer_tool,python_tool, youtube_transcript_tool, calculator_tool, read_document_tool, search_tool)
from llama_index.core.workflow import (Workflow, Context, step, StartEvent, StopEvent, Event)
from llama_index.core.agent import ReActAgent
from dotenv import load_dotenv
import tempfile
load_dotenv()
OPEN_AI = os.getenv("OPENAI_API_KEY")
# Alfred React Agent
class AlfredAdvancedWorkflow(AgentWorkflow):
    def __init__(self, llm:OpenAI):
        OPEN_AI = os.getenv("OPENAI_API_KEY").strip()
        self.llm = OpenAI(model = "gpt-4o-mini", temperature=0.1, api_key=OPEN_AI)
        self.root_agent = ReActAgent(
            name="alfred_router",

            description="Main agent that routes to the correct tool based on question type.",

            system_prompt="""You are Alfred, an elite autonomous agent for the GAIA benchmark.

STRATEGY FOR SUCCESS:
1. **PYTHON FILES (.py)**: If asked about a python file output:
   - First, use `read_document` to see the code.
   - Second, use `python_interpreter` to execute that exact code and get the answer.
   
2. **COMPLEX SEARCH**: For questions like "Pitcher before X...", do not give up.
   - Step 1: Find the specific team/roster and year mentioned.
   - Step 2: Find the list/order of players.
   - Step 3: Extract the names.
   - If `web_search` returns generic info, refine the query (e.g., "Taishō Tamai roster 2023 numbers").

3. **FILES**: Always trust the filename provided in the prompt. If `read_document` fails initially, check the extension.

4. **FINAL ANSWER**:
   - Must be concise.
   - No preamble like "The answer is". Just the value.

REMEMBER: Extract the EXACT answer. No explanations, no context, no PREAMBLES.""",

            tools=[search_tool,python_tool, read_document_tool, image_analyzer_tool, youtube_transcript_tool, calculator_tool],
            llm=llm,
            verbose=True,
            max_iterations=15)
        
        self.workflow = AgentWorkflow(agents = [self.root_agent],
                                      root_agent="alfred_router",
                                    initial_state={"tool_calls": 0}, state_prompt="Current state: {state}. User message: {msg}")
        
    async def run(self, question: str) -> str:
        from llama_index.core.workflow import Context
        ctx = Context(self.workflow)
        response = await self.workflow.run(user_msg=question, ctx=ctx)

        def make_concise(response: str) -> str:
            prompt = f""" Given the following question and answer, 
            provide a concise summary of the answer in a single word of single sentence.\n\n
            Question: {question}\n
            Answer: {response}\n\n
            Concise answer. No explanations """ 
            answer = self.llm.complete(prompt) 
            return answer
            
        answer = make_concise(response)

        answer = str(answer).strip().replace("  ", " ")

        if ", " not in answer and "," in answer:
            answer = answer.replace(",", ", ")      

        return answer


# Events
class RouteEvent(Event):
    question: str
    agent_type: str

class AgentResponseEvent(Event):
    question: str
    answer: str

class ReviewEvent(Event):
    question: str
    answer: str

class FeedbackEvent(Event):
    question: str
    previous_answer: str
    feedback: str


# Main workflow