Update modules/chatbot/chat_process.py
Browse files- modules/chatbot/chat_process.py +134 -98
modules/chatbot/chat_process.py
CHANGED
|
@@ -3,16 +3,18 @@ import os
|
|
| 3 |
import json
|
| 4 |
import boto3
|
| 5 |
import logging
|
|
|
|
| 6 |
import base64
|
| 7 |
from typing import Generator
|
| 8 |
from botocore.config import Config
|
|
|
|
| 9 |
|
| 10 |
logger = logging.getLogger(__name__)
|
| 11 |
|
| 12 |
class ChatProcessor:
|
| 13 |
def __init__(self):
|
| 14 |
"""Inicializa el procesador de chat con AWS Bedrock (Jamba 1.5 Large)"""
|
| 15 |
-
# Configurar cliente de Bedrock
|
| 16 |
self.bedrock = boto3.client(
|
| 17 |
'bedrock-runtime',
|
| 18 |
region_name=os.environ.get("AWS_REGION", "us-east-1"),
|
|
@@ -20,7 +22,7 @@ class ChatProcessor:
|
|
| 20 |
aws_secret_access_key=os.environ.get("AWS_SECRET_ACCESS_KEY"),
|
| 21 |
config=Config(
|
| 22 |
retries={
|
| 23 |
-
'max_attempts':
|
| 24 |
'mode': 'adaptive'
|
| 25 |
}
|
| 26 |
)
|
|
@@ -28,6 +30,8 @@ class ChatProcessor:
|
|
| 28 |
self.conversation_history = []
|
| 29 |
self.semantic_context = None
|
| 30 |
self.current_lang = 'en'
|
|
|
|
|
|
|
| 31 |
|
| 32 |
def set_semantic_context(self, text, metrics, graph_data, lang_code='en'):
|
| 33 |
"""Configura el contexto semántico completo para el chat"""
|
|
@@ -40,7 +44,7 @@ class ChatProcessor:
|
|
| 40 |
'key_concepts': metrics.get('key_concepts', []),
|
| 41 |
'concept_centrality': metrics.get('concept_centrality', {}),
|
| 42 |
'graph_available': graph_data is not None,
|
| 43 |
-
'graph_data': graph_data,
|
| 44 |
'language': lang_code
|
| 45 |
}
|
| 46 |
self.current_lang = lang_code
|
|
@@ -111,10 +115,10 @@ Vos tâches:
|
|
| 111 |
"""Construye el contenido multimodal con texto + grafo si está disponible"""
|
| 112 |
content_parts = []
|
| 113 |
|
| 114 |
-
# 1. Añadir el texto del documento
|
| 115 |
if self.semantic_context and 'full_text' in self.semantic_context:
|
| 116 |
content_parts.append(
|
| 117 |
-
f"Documento analizado (extracto):\n{self.semantic_context['full_text'][:
|
| 118 |
)
|
| 119 |
|
| 120 |
# 2. Añadir conceptos clave
|
|
@@ -122,107 +126,139 @@ Vos tâches:
|
|
| 122 |
concepts = self.semantic_context['key_concepts'][:5]
|
| 123 |
content_parts.append(f"Conceptos clave: {concepts}")
|
| 124 |
|
| 125 |
-
# 3. Añadir el
|
| 126 |
-
if self.semantic_context and self.semantic_context.get('graph_available'):
|
| 127 |
-
graph_data = self.semantic_context.get('graph_data')
|
| 128 |
-
if graph_data:
|
| 129 |
-
# Si el grafo ya es base64, lo usamos directamente
|
| 130 |
-
if isinstance(graph_data, str) and graph_data.startswith('iVBOR'):
|
| 131 |
-
content_parts.append(f"")
|
| 132 |
-
else:
|
| 133 |
-
content_parts.append("Grafo disponible para consultas visuales.")
|
| 134 |
-
|
| 135 |
-
# 4. Añadir el mensaje actual del usuario
|
| 136 |
content_parts.append(f"Pregunta del usuario: {message}")
|
| 137 |
|
| 138 |
return "\n\n".join(content_parts)
|
| 139 |
|
| 140 |
def process_chat_input(self, message: str, lang_code: str) -> Generator[str, None, None]:
|
| 141 |
"""Procesa el mensaje con todo el contexto disponible usando Jamba 1.5 en Bedrock"""
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 146 |
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 151 |
|
| 152 |
-
|
| 153 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 154 |
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
# Añadir historial de conversación (últimos 4 intercambios para no exceder contexto)
|
| 165 |
-
for msg in self.conversation_history[-8:]: # 8 mensajes = 4 intercambios
|
| 166 |
-
messages.append(msg)
|
| 167 |
-
|
| 168 |
-
# Añadir mensaje actual del usuario
|
| 169 |
-
messages.append({
|
| 170 |
-
"role": "user",
|
| 171 |
-
"content": user_content
|
| 172 |
-
})
|
| 173 |
-
|
| 174 |
-
# Preparar el cuerpo de la petición para Jamba 1.5 Large
|
| 175 |
-
request_body = {
|
| 176 |
-
"messages": messages,
|
| 177 |
-
"max_tokens": 2000,
|
| 178 |
-
"temperature": 0.7,
|
| 179 |
-
"top_p": 0.9,
|
| 180 |
-
"stop": [],
|
| 181 |
-
"n": 1
|
| 182 |
-
}
|
| 183 |
-
|
| 184 |
-
# Llamar a Bedrock (sin streaming por ahora, Jamba no soporta streaming nativo)
|
| 185 |
-
response = self.bedrock.invoke_model(
|
| 186 |
-
modelId='ai21.jamba-1-5-large-v1:0',
|
| 187 |
-
contentType='application/json',
|
| 188 |
-
accept='application/json',
|
| 189 |
-
body=json.dumps(request_body)
|
| 190 |
-
)
|
| 191 |
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
full_response = response_body['choices'][0]['message']['content']
|
| 198 |
-
else:
|
| 199 |
-
full_response = "Lo siento, no pude generar una respuesta."
|
| 200 |
-
|
| 201 |
-
# Limpiar la respuesta
|
| 202 |
-
clean_response = self.clean_generated_text(full_response)
|
| 203 |
-
|
| 204 |
-
# Simular streaming para mantener compatibilidad con la interfaz
|
| 205 |
-
# Dividimos la respuesta en fragmentos para simular streaming
|
| 206 |
-
chunk_size = 50
|
| 207 |
-
for i in range(0, len(clean_response), chunk_size):
|
| 208 |
-
yield clean_response[i:i+chunk_size]
|
| 209 |
-
|
| 210 |
-
# Guardar respuesta en historial
|
| 211 |
-
self.conversation_history.append({"role": "user", "content": message})
|
| 212 |
-
self.conversation_history.append({"role": "assistant", "content": clean_response})
|
| 213 |
-
|
| 214 |
-
# Mantener historial manejable (últimos 20 mensajes)
|
| 215 |
-
if len(self.conversation_history) > 40:
|
| 216 |
-
self.conversation_history = self.conversation_history[-40:]
|
| 217 |
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
|
| 222 |
-
|
| 223 |
-
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
import json
|
| 4 |
import boto3
|
| 5 |
import logging
|
| 6 |
+
import time
|
| 7 |
import base64
|
| 8 |
from typing import Generator
|
| 9 |
from botocore.config import Config
|
| 10 |
+
from botocore.exceptions import ClientError
|
| 11 |
|
| 12 |
logger = logging.getLogger(__name__)
|
| 13 |
|
| 14 |
class ChatProcessor:
|
| 15 |
def __init__(self):
|
| 16 |
"""Inicializa el procesador de chat con AWS Bedrock (Jamba 1.5 Large)"""
|
| 17 |
+
# Configurar cliente de Bedrock con más reintentos
|
| 18 |
self.bedrock = boto3.client(
|
| 19 |
'bedrock-runtime',
|
| 20 |
region_name=os.environ.get("AWS_REGION", "us-east-1"),
|
|
|
|
| 22 |
aws_secret_access_key=os.environ.get("AWS_SECRET_ACCESS_KEY"),
|
| 23 |
config=Config(
|
| 24 |
retries={
|
| 25 |
+
'max_attempts': 5,
|
| 26 |
'mode': 'adaptive'
|
| 27 |
}
|
| 28 |
)
|
|
|
|
| 30 |
self.conversation_history = []
|
| 31 |
self.semantic_context = None
|
| 32 |
self.current_lang = 'en'
|
| 33 |
+
self.last_request_time = 0
|
| 34 |
+
self.min_request_interval = 2.0 # Mínimo 2 segundos entre peticiones
|
| 35 |
|
| 36 |
def set_semantic_context(self, text, metrics, graph_data, lang_code='en'):
|
| 37 |
"""Configura el contexto semántico completo para el chat"""
|
|
|
|
| 44 |
'key_concepts': metrics.get('key_concepts', []),
|
| 45 |
'concept_centrality': metrics.get('concept_centrality', {}),
|
| 46 |
'graph_available': graph_data is not None,
|
| 47 |
+
'graph_data': graph_data,
|
| 48 |
'language': lang_code
|
| 49 |
}
|
| 50 |
self.current_lang = lang_code
|
|
|
|
| 115 |
"""Construye el contenido multimodal con texto + grafo si está disponible"""
|
| 116 |
content_parts = []
|
| 117 |
|
| 118 |
+
# 1. Añadir el texto del documento (reducido para ahorrar tokens)
|
| 119 |
if self.semantic_context and 'full_text' in self.semantic_context:
|
| 120 |
content_parts.append(
|
| 121 |
+
f"Documento analizado (extracto):\n{self.semantic_context['full_text'][:1000]}..."
|
| 122 |
)
|
| 123 |
|
| 124 |
# 2. Añadir conceptos clave
|
|
|
|
| 126 |
concepts = self.semantic_context['key_concepts'][:5]
|
| 127 |
content_parts.append(f"Conceptos clave: {concepts}")
|
| 128 |
|
| 129 |
+
# 3. Añadir el mensaje actual del usuario
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 130 |
content_parts.append(f"Pregunta del usuario: {message}")
|
| 131 |
|
| 132 |
return "\n\n".join(content_parts)
|
| 133 |
|
| 134 |
def process_chat_input(self, message: str, lang_code: str) -> Generator[str, None, None]:
|
| 135 |
"""Procesa el mensaje con todo el contexto disponible usando Jamba 1.5 en Bedrock"""
|
| 136 |
+
max_retries = 3
|
| 137 |
+
base_delay = 5
|
| 138 |
+
|
| 139 |
+
for attempt in range(max_retries):
|
| 140 |
+
try:
|
| 141 |
+
if not self.semantic_context:
|
| 142 |
+
yield "Error: Contexto semántico no configurado. Recargue el análisis."
|
| 143 |
+
return
|
| 144 |
+
|
| 145 |
+
# Actualizar idioma si es diferente
|
| 146 |
+
if lang_code != self.current_lang:
|
| 147 |
+
self.current_lang = lang_code
|
| 148 |
+
logger.info(f"Idioma cambiado a: {lang_code}")
|
| 149 |
+
|
| 150 |
+
# Control de tasa simple (no más de 1 petición cada 2 segundos)
|
| 151 |
+
current_time = time.time()
|
| 152 |
+
time_since_last = current_time - self.last_request_time
|
| 153 |
+
if time_since_last < self.min_request_interval:
|
| 154 |
+
sleep_time = self.min_request_interval - time_since_last
|
| 155 |
+
logger.info(f"Respetando intervalo mínimo: esperando {sleep_time:.2f}s")
|
| 156 |
+
time.sleep(sleep_time)
|
| 157 |
+
|
| 158 |
+
# Construir el contenido multimodal
|
| 159 |
+
user_content = self._build_multimodal_content(message)
|
| 160 |
+
|
| 161 |
+
# Construir mensajes para Jamba
|
| 162 |
+
messages = []
|
| 163 |
+
|
| 164 |
+
# Añadir system prompt
|
| 165 |
+
messages.append({
|
| 166 |
+
"role": "system",
|
| 167 |
+
"content": self._get_system_prompt()
|
| 168 |
+
})
|
| 169 |
|
| 170 |
+
# Añadir historial de conversación (últimos 4 intercambios)
|
| 171 |
+
for msg in self.conversation_history[-8:]:
|
| 172 |
+
messages.append(msg)
|
| 173 |
+
|
| 174 |
+
# Añadir mensaje actual del usuario
|
| 175 |
+
messages.append({
|
| 176 |
+
"role": "user",
|
| 177 |
+
"content": user_content
|
| 178 |
+
})
|
| 179 |
|
| 180 |
+
# Preparar el cuerpo de la petición para Jamba 1.5 Large
|
| 181 |
+
request_body = {
|
| 182 |
+
"messages": messages,
|
| 183 |
+
"max_tokens": 1500, # Reducido de 2000 a 1500 para ahorrar tokens
|
| 184 |
+
"temperature": 0.7,
|
| 185 |
+
"top_p": 0.9,
|
| 186 |
+
"stop": [],
|
| 187 |
+
"n": 1
|
| 188 |
+
}
|
| 189 |
|
| 190 |
+
logger.info(f"Enviando petición a Jamba (intento {attempt + 1}/{max_retries})")
|
| 191 |
+
|
| 192 |
+
# Llamar a Bedrock
|
| 193 |
+
response = self.bedrock.invoke_model(
|
| 194 |
+
modelId='ai21.jamba-1-5-large-v1:0',
|
| 195 |
+
contentType='application/json',
|
| 196 |
+
accept='application/json',
|
| 197 |
+
body=json.dumps(request_body)
|
| 198 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 199 |
|
| 200 |
+
# Actualizar tiempo de última petición
|
| 201 |
+
self.last_request_time = time.time()
|
| 202 |
+
|
| 203 |
+
# Procesar la respuesta
|
| 204 |
+
response_body = json.loads(response['body'].read())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 205 |
|
| 206 |
+
# Extraer el texto de la respuesta
|
| 207 |
+
if 'choices' in response_body and len(response_body['choices']) > 0:
|
| 208 |
+
full_response = response_body['choices'][0]['message']['content']
|
| 209 |
+
else:
|
| 210 |
+
full_response = "Lo siento, no pude generar una respuesta."
|
| 211 |
+
|
| 212 |
+
# Limpiar la respuesta
|
| 213 |
+
clean_response = self.clean_generated_text(full_response)
|
| 214 |
+
|
| 215 |
+
# Simular streaming
|
| 216 |
+
chunk_size = 50
|
| 217 |
+
for i in range(0, len(clean_response), chunk_size):
|
| 218 |
+
yield clean_response[i:i+chunk_size]
|
| 219 |
+
|
| 220 |
+
# Guardar respuesta en historial
|
| 221 |
+
self.conversation_history.append({"role": "user", "content": message})
|
| 222 |
+
self.conversation_history.append({"role": "assistant", "content": clean_response})
|
| 223 |
+
|
| 224 |
+
# Mantener historial manejable
|
| 225 |
+
if len(self.conversation_history) > 40:
|
| 226 |
+
self.conversation_history = self.conversation_history[-40:]
|
| 227 |
+
|
| 228 |
+
logger.info("Respuesta generada y guardada en historial")
|
| 229 |
+
return # Éxito, salir del bucle
|
| 230 |
+
|
| 231 |
+
except ClientError as e:
|
| 232 |
+
error_code = e.response['Error']['Code']
|
| 233 |
+
error_message = e.response['Error']['Message']
|
| 234 |
+
|
| 235 |
+
if error_code == 'ThrottlingException' and attempt < max_retries - 1:
|
| 236 |
+
wait_time = base_delay * (2 ** attempt) # 5, 10, 20 segundos
|
| 237 |
+
logger.warning(f"Throttling detectado. Esperando {wait_time}s (intento {attempt+1}/{max_retries})")
|
| 238 |
+
|
| 239 |
+
# Mensaje amigable para el usuario
|
| 240 |
+
if attempt == 0:
|
| 241 |
+
yield "⏳ El sistema está procesando muchas solicitudes. Espera un momento..."
|
| 242 |
+
|
| 243 |
+
time.sleep(wait_time)
|
| 244 |
+
else:
|
| 245 |
+
logger.error(f"Error en process_chat_input: {error_code} - {error_message}", exc_info=True)
|
| 246 |
+
error_messages = {
|
| 247 |
+
'en': "Error processing message. Please try again in a moment.",
|
| 248 |
+
'es': "Error al procesar mensaje. Intente nuevamente en un momento.",
|
| 249 |
+
'pt': "Erro ao processar mensagem. Tente novamente em um momento.",
|
| 250 |
+
'fr': "Erreur lors du traitement. Réessayez dans un moment."
|
| 251 |
+
}
|
| 252 |
+
yield error_messages.get(self.current_lang, "Processing error")
|
| 253 |
+
return
|
| 254 |
+
|
| 255 |
+
except Exception as e:
|
| 256 |
+
logger.error(f"Error inesperado en process_chat_input: {str(e)}", exc_info=True)
|
| 257 |
+
error_messages = {
|
| 258 |
+
'en': "Unexpected error. Please try again.",
|
| 259 |
+
'es': "Error inesperado. Intente nuevamente.",
|
| 260 |
+
'pt': "Erro inesperado. Tente novamente.",
|
| 261 |
+
'fr': "Erreur inattendue. Réessayez."
|
| 262 |
+
}
|
| 263 |
+
yield error_messages.get(self.current_lang, "Processing error")
|
| 264 |
+
return
|