runner-ai-intelligence / src /agents /chat_agent.py
avfranco's picture
HF Space deploy snapshot (minimal allow-list)
557ee65
import logging
import config
import time
from typing import Dict, Any, List, Optional
from .base import BaseAgent
from llm.base import LLMClient
from observability import logger as obs_logger
from observability import components as obs_components
from pathlib import Path
logger = logging.getLogger(__name__)
class ChatAgent(BaseAgent):
"""
Agent responsible for handling conversational interactions with the user.
It can answer questions about the run data, insights, and plan, and can also
delegate to other agents (like VisualizationAgent) for specific tasks.
"""
def __init__(self, llm_client: LLMClient):
self.llm_client = llm_client
self.context = {}
self.instruction = self._load_instruction("en")
def _load_instruction(self, language: str = "en") -> str:
try:
# Resolve path relative to this file
base_path = Path(__file__).parent.parent / "prompts"
filename = f"chat_{language}.txt"
file_path = base_path / filename
if not file_path.exists():
logger.warning(f"Prompt file not found: {file_path}. Falling back to English.")
file_path = base_path / "chat_en.txt"
if not file_path.exists():
logger.error("English prompt file missing!")
return "You are a helpful running coach assistant."
return file_path.read_text(encoding="utf-8")
except Exception as e:
logger.error(f"Error loading prompt for language {language}: {e}")
return "You are a helpful running coach assistant."
async def run(self, message: str, context: Dict[str, Any], language: str = "en") -> str:
# Load language-specific instruction
self.instruction = self._load_instruction(language)
"""
Process a user message with the given context.
"""
self.context = context
# Construct a prompt with context
is_pt = language == "pt-BR"
if is_pt:
prompt = f"""
Contexto:
Características: {context.get('features', 'Não disponível')}
Insights: {context.get('insights', 'Não disponível')}
Plano: {context.get('plan', 'Não disponível')}
Resumo: {context.get('summary', 'Não disponível')}
### Contexto de Performance Histórica (Injetado automaticamente)
{self._format_auto_insights(context.get('auto_injected_insights', []), language=language)}
Mensagem do Usuário: {message}
Resposta:
"""
else:
prompt = f"""
Context:
Features: {context.get('features', 'Not available')}
Insights: {context.get('insights', 'Not available')}
Plan: {context.get('plan', 'Not available')}
Summary: {context.get('summary', 'Not available')}
### Historical Performance Context (Auto-injected)
{self._format_auto_insights(context.get('auto_injected_insights', []))}
User Message: {message}
Answer:
"""
with obs_logger.start_span("chat_agent.run", obs_components.AGENT):
start_time = time.time()
try:
response = await self.llm_client.generate(
prompt, instruction=self.instruction, name="chat_agent"
)
duration_ms = (time.time() - start_time) * 1000
obs_logger.log_event(
"info",
"Chat response generated",
component=obs_components.AGENT,
fields={
"duration_ms": duration_ms,
"language": language,
"message_length": len(message),
"response_length": len(str(response)),
},
)
return str(response)
except Exception as e:
duration_ms = (time.time() - start_time) * 1000
obs_logger.log_event(
"error",
f"Chat agent failed: {e}",
component=obs_components.AGENT,
fields={
"duration_ms": duration_ms,
"language": language,
"error_type": type(e).__name__,
"error_message": str(e),
},
)
logger.error(f"Chat agent failed: {e}")
return (
"Desculpe, estou com problemas para processar seu pedido agora."
if language == "pt-BR"
else "I'm sorry, I'm having trouble processing your request right now."
)
def _format_auto_insights(self, insights: List[Dict[str, Any]], language: str = "en") -> str:
is_pt = language == "pt-BR"
if not insights:
return (
"Nenhum insight anterior encontrado no histórico."
if is_pt
else "No previous insights found in history."
)
lines = []
unknown_date = "Data Desconhecida" if is_pt else "Unknown Date"
for item in insights:
date_str = item.get("date", unknown_date)
# Insights are stored as a dict of message strings
msgs = item.get("insights", {})
if isinstance(msgs, dict):
parts = []
for k, v in msgs.items():
if isinstance(v, dict):
m = v.get("message")
if m:
parts.append(m)
elif isinstance(v, list):
for sub_v in v:
if isinstance(sub_v, dict):
m = sub_v.get("message")
if m:
parts.append(m)
elif isinstance(v, str):
parts.append(v)
content = " | ".join(parts)
else:
content = str(msgs)
lines.append(f"- [{date_str}]: {content}")
return "\n".join(lines)