Cmuroc27's picture
funcion read mejorada
fc7df56
import asyncio
import os
from llama_index.core.agent.workflow import AgentWorkflow
from llama_index.llms.openai import OpenAI
from tools import (image_analyzer_tool,python_tool, youtube_transcript_tool, calculator_tool, read_document_tool, search_tool)
from llama_index.core.workflow import (Workflow, Context, step, StartEvent, StopEvent, Event)
from llama_index.core.agent import ReActAgent
from dotenv import load_dotenv
import tempfile
load_dotenv()
OPEN_AI = os.getenv("OPENAI_API_KEY")
# Alfred React Agent
class AlfredAdvancedWorkflow(AgentWorkflow):
def __init__(self, llm:OpenAI):
OPEN_AI = os.getenv("OPENAI_API_KEY").strip()
self.llm = OpenAI(model = "gpt-4o-mini", temperature=0.1, api_key=OPEN_AI)
self.root_agent = ReActAgent(
name="alfred_router",
description="Main agent that routes to the correct tool based on question type.",
system_prompt="""You are Alfred, an elite autonomous agent for the GAIA benchmark.
STRATEGY FOR SUCCESS:
1. **PYTHON FILES (.py)**: If asked about a python file output:
- First, use `read_document` to see the code.
- Second, use `python_interpreter` to execute that exact code and get the answer.
2. **COMPLEX SEARCH**: For questions like "Pitcher before X...", do not give up.
- Step 1: Find the specific team/roster and year mentioned.
- Step 2: Find the list/order of players.
- Step 3: Extract the names.
- If `web_search` returns generic info, refine the query (e.g., "Taishō Tamai roster 2023 numbers").
3. **FILES**: Always trust the filename provided in the prompt. If `read_document` fails initially, check the extension.
4. **FINAL ANSWER**:
- Must be concise.
- No preamble like "The answer is". Just the value.
REMEMBER: Extract the EXACT answer. No explanations, no context, no PREAMBLES.""",
tools=[search_tool,python_tool, read_document_tool, image_analyzer_tool, youtube_transcript_tool, calculator_tool],
llm=llm,
verbose=True,
max_iterations=15)
self.workflow = AgentWorkflow(agents = [self.root_agent],
root_agent="alfred_router",
initial_state={"tool_calls": 0}, state_prompt="Current state: {state}. User message: {msg}")
async def run(self, question: str) -> str:
from llama_index.core.workflow import Context
ctx = Context(self.workflow)
response = await self.workflow.run(user_msg=question, ctx=ctx)
def make_concise(response: str) -> str:
prompt = f""" Given the following question and answer,
provide a concise summary of the answer in a single word of single sentence.\n\n
Question: {question}\n
Answer: {response}\n\n
Concise answer. No explanations """
answer = self.llm.complete(prompt)
return answer
answer = make_concise(response)
answer = str(answer).strip().replace(" ", " ")
if ", " not in answer and "," in answer:
answer = answer.replace(",", ", ")
return answer
# Events
class RouteEvent(Event):
question: str
agent_type: str
class AgentResponseEvent(Event):
question: str
answer: str
class ReviewEvent(Event):
question: str
answer: str
class FeedbackEvent(Event):
question: str
previous_answer: str
feedback: str
# Main workflow