Spaces:
Sleeping
Sleeping
File size: 5,344 Bytes
2c060c5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 | """
Entry point for the command-line interface (CLI) of the todo-agent.
This script demonstrates a typical setup for a stateful, conversational agent:
- Loads environment variables for API keys and configuration.
- Initializes tracing and observability integrations (Phoenix, Weave).
- Manages conversation history by saving and loading it from a JSON file.
- Creates an agent with a file-based storage backend (`JsonTodoStorage`).
- Runs a loop to interact with the user via the command line.
"""
# Standard library imports
import os
import asyncio
import json
# Third-party imports
from dotenv import load_dotenv
from phoenix.otel import register
import weave
from agents import Runner, Agent
# Local application imports
from agent.todo_agent import create_agent
from agent.storage import JsonTodoStorage
# --- Initial Setup ---
# Load environment variables from a .env file. This is a best practice for
# managing secrets and configuration without hardcoding them in the source code.
load_dotenv()
# --- Tracing & Observation Setup ---
# Initialize integrations to observe and debug the agent's behavior.
# This is crucial for understanding the agent's decision-making process.
def initialize_tracing():
"""Initialize tracing with graceful error handling."""
os.environ["OPENAI_TRACING_ENABLED"] = "1"
os.environ["WEAVE_PRINT_CALL_LINK"] = "false"
# Phoenix: Add minimal custom resource attributes via environment variable
os.environ["OTEL_RESOURCE_ATTRIBUTES"] = "app.name=todo-agent,tutorial.type=production,environment=production,interface=cli"
try:
register(project_name="todo-agent-cli", auto_instrument=True)
print("✅ Phoenix tracing initialized for: todo-agent-cli")
except Exception as e:
print(f"⚠️ Phoenix tracing failed: {e}")
if not weave.get_client():
try:
weave.init("todo-agent-cli")
print("✅ Weave tracing initialized for: todo-agent-cli")
except Exception as e:
print(f"⚠️ Weave tracing failed (continuing without Weave): {e}")
initialize_tracing()
# -----------------------------------------------------------------------------
# Session Management
#
# To create a stateful conversation, we save/load the message history
# to a JSON file, allowing the agent to "remember" past interactions.
# -----------------------------------------------------------------------------
SESSION_FILE = "data/session_default.json"
MAX_TURNS = 12 # Max *user* turns to keep in history to prevent token overflow.
def load_session() -> list:
"""Loads the message history from the session file."""
try:
with open(SESSION_FILE, "r") as f:
data = json.load(f)
# Return the history if it exists, otherwise an empty list.
return data.get("history", [])
except (FileNotFoundError, json.JSONDecodeError):
# If the file doesn't exist or is empty/corrupt, start a new session.
return []
def save_session(history: list):
"""Saves the message history to the session file."""
# Ensure the 'data' directory exists.
os.makedirs(os.path.dirname(SESSION_FILE), exist_ok=True)
with open(SESSION_FILE, "w") as f:
# Save the history in a structured format.
json.dump({"history": history}, f, indent=2)
async def main():
# Load the previous conversation history to maintain context.
history = load_session()
# Create the agent instance using the central factory,
# providing it with the file-based storage system.
agent = create_agent(
storage=JsonTodoStorage(),
agent_name="To-Do Agent (CLI)"
)
print("To-Do Agent (CLI) is ready. Tracing is enabled. Type 'exit' to quit.")
# Start the main interaction loop.
while True:
user_input = input("\nYou: ")
if user_input.strip().lower() in ("exit", "quit"):
print("Goodbye!")
break
# Add the new user message to the history.
history.append({"role": "user", "content": user_input})
# --- Context Window Management ---
# To prevent token overflow, we trim the history to the last `MAX_TURNS`.
user_message_indices = [i for i, msg in enumerate(history) if msg.get("role") == "user"]
if len(user_message_indices) > MAX_TURNS:
# Find the index of the oldest user message to keep.
start_index = user_message_indices[-MAX_TURNS]
print(f"(Trimming conversation history to the last {MAX_TURNS} turns...)")
history = history[start_index:]
# --- Agent Execution ---
# The Runner handles the conversation turn, calling tools and the LLM.
result = await Runner.run(
agent,
input=history,
)
print("----"*10)
print(f"Agent: {result.final_output}")
print("===="*10)
# The agent's result contains the full, updated history (user, assistant, tools).
# We replace our local history with this to prepare for the next turn.
history = result.to_input_list()
# Save the updated history to disk to maintain state for the next session.
save_session(history)
if __name__ == "__main__":
# Run the asynchronous main function.
asyncio.run(main()) |