Spaces:
Running
Running
File size: 6,382 Bytes
557ee65 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 | import logging
import config
import time
from typing import Dict, Any, List, Optional
from .base import BaseAgent
from llm.base import LLMClient
from observability import logger as obs_logger
from observability import components as obs_components
from pathlib import Path
logger = logging.getLogger(__name__)
class ChatAgent(BaseAgent):
"""
Agent responsible for handling conversational interactions with the user.
It can answer questions about the run data, insights, and plan, and can also
delegate to other agents (like VisualizationAgent) for specific tasks.
"""
def __init__(self, llm_client: LLMClient):
self.llm_client = llm_client
self.context = {}
self.instruction = self._load_instruction("en")
def _load_instruction(self, language: str = "en") -> str:
try:
# Resolve path relative to this file
base_path = Path(__file__).parent.parent / "prompts"
filename = f"chat_{language}.txt"
file_path = base_path / filename
if not file_path.exists():
logger.warning(f"Prompt file not found: {file_path}. Falling back to English.")
file_path = base_path / "chat_en.txt"
if not file_path.exists():
logger.error("English prompt file missing!")
return "You are a helpful running coach assistant."
return file_path.read_text(encoding="utf-8")
except Exception as e:
logger.error(f"Error loading prompt for language {language}: {e}")
return "You are a helpful running coach assistant."
async def run(self, message: str, context: Dict[str, Any], language: str = "en") -> str:
# Load language-specific instruction
self.instruction = self._load_instruction(language)
"""
Process a user message with the given context.
"""
self.context = context
# Construct a prompt with context
is_pt = language == "pt-BR"
if is_pt:
prompt = f"""
Contexto:
Características: {context.get('features', 'Não disponível')}
Insights: {context.get('insights', 'Não disponível')}
Plano: {context.get('plan', 'Não disponível')}
Resumo: {context.get('summary', 'Não disponível')}
### Contexto de Performance Histórica (Injetado automaticamente)
{self._format_auto_insights(context.get('auto_injected_insights', []), language=language)}
Mensagem do Usuário: {message}
Resposta:
"""
else:
prompt = f"""
Context:
Features: {context.get('features', 'Not available')}
Insights: {context.get('insights', 'Not available')}
Plan: {context.get('plan', 'Not available')}
Summary: {context.get('summary', 'Not available')}
### Historical Performance Context (Auto-injected)
{self._format_auto_insights(context.get('auto_injected_insights', []))}
User Message: {message}
Answer:
"""
with obs_logger.start_span("chat_agent.run", obs_components.AGENT):
start_time = time.time()
try:
response = await self.llm_client.generate(
prompt, instruction=self.instruction, name="chat_agent"
)
duration_ms = (time.time() - start_time) * 1000
obs_logger.log_event(
"info",
"Chat response generated",
component=obs_components.AGENT,
fields={
"duration_ms": duration_ms,
"language": language,
"message_length": len(message),
"response_length": len(str(response)),
},
)
return str(response)
except Exception as e:
duration_ms = (time.time() - start_time) * 1000
obs_logger.log_event(
"error",
f"Chat agent failed: {e}",
component=obs_components.AGENT,
fields={
"duration_ms": duration_ms,
"language": language,
"error_type": type(e).__name__,
"error_message": str(e),
},
)
logger.error(f"Chat agent failed: {e}")
return (
"Desculpe, estou com problemas para processar seu pedido agora."
if language == "pt-BR"
else "I'm sorry, I'm having trouble processing your request right now."
)
def _format_auto_insights(self, insights: List[Dict[str, Any]], language: str = "en") -> str:
is_pt = language == "pt-BR"
if not insights:
return (
"Nenhum insight anterior encontrado no histórico."
if is_pt
else "No previous insights found in history."
)
lines = []
unknown_date = "Data Desconhecida" if is_pt else "Unknown Date"
for item in insights:
date_str = item.get("date", unknown_date)
# Insights are stored as a dict of message strings
msgs = item.get("insights", {})
if isinstance(msgs, dict):
parts = []
for k, v in msgs.items():
if isinstance(v, dict):
m = v.get("message")
if m:
parts.append(m)
elif isinstance(v, list):
for sub_v in v:
if isinstance(sub_v, dict):
m = sub_v.get("message")
if m:
parts.append(m)
elif isinstance(v, str):
parts.append(v)
content = " | ".join(parts)
else:
content = str(msgs)
lines.append(f"- [{date_str}]: {content}")
return "\n".join(lines)
|