Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -13,9 +13,28 @@ from datetime import datetime
|
|
| 13 |
from tenacity import retry, stop_after_attempt, wait_random_exponential
|
| 14 |
from io import BytesIO
|
| 15 |
import docx
|
|
|
|
|
|
|
| 16 |
st.set_page_config(page_title="El Detective de Alimentos", page_icon="🍎", layout="wide")
|
| 17 |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
| 18 |
logger = logging.getLogger("food_detective_app")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
try:
|
| 20 |
if 'GEMINI_API_KEY' in st.secrets:
|
| 21 |
GEMINI_API_KEY = st.secrets['GEMINI_API_KEY']
|
|
@@ -28,6 +47,7 @@ try:
|
|
| 28 |
except Exception as e:
|
| 29 |
st.error(f"❌ Error al configurar Gemini API: {e}")
|
| 30 |
st.stop()
|
|
|
|
| 31 |
@st.cache_resource
|
| 32 |
def get_gemini_model():
|
| 33 |
try:
|
|
@@ -36,6 +56,7 @@ def get_gemini_model():
|
|
| 36 |
st.error(f"❌ No se pudo cargar el modelo Gemini: {e}")
|
| 37 |
return None
|
| 38 |
model = get_gemini_model()
|
|
|
|
| 39 |
@st.cache_data
|
| 40 |
def load_data():
|
| 41 |
try:
|
|
@@ -52,6 +73,8 @@ def load_data():
|
|
| 52 |
st.error(f"Error cargando los archivos de datos: {e}")
|
| 53 |
return None, None
|
| 54 |
alimentos_data, foodb_index = load_data()
|
|
|
|
|
|
|
| 55 |
COMPOUND_SYNONYM_MAP = {
|
| 56 |
"gluten": ["gluten", "gliadin"],
|
| 57 |
"caseína": ["casein", "casomorphin"],
|
|
@@ -1055,23 +1078,24 @@ INTEGRATED_NEURO_FOOD_MAP = {
|
|
| 1055 |
"fuentes_comunes": ["inflamación sistémica modulada por dieta rica en grasas/sacharosa; el pathway es endógeno pero sensible a la dieta y microbiota"]
|
| 1056 |
}
|
| 1057 |
}
|
|
|
|
|
|
|
| 1058 |
def generate_word_report(report_text):
|
| 1059 |
-
"""
|
| 1060 |
-
Carga una plantilla de Word, reemplaza un marcador de posición y la devuelve como un objeto binario en memoria.
|
| 1061 |
-
"""
|
| 1062 |
try:
|
| 1063 |
template_path = os.path.join("PLANTILLAS", "PLANTILLA_INTERPRETACION.docx")
|
| 1064 |
-
|
| 1065 |
-
|
| 1066 |
-
|
| 1067 |
-
|
| 1068 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1069 |
doc_io = BytesIO()
|
| 1070 |
doc.save(doc_io)
|
| 1071 |
doc_io.seek(0)
|
| 1072 |
-
|
| 1073 |
return doc_io
|
| 1074 |
-
|
| 1075 |
except Exception as e:
|
| 1076 |
logger.error(f"Error al generar el informe de Word: {e}")
|
| 1077 |
return None
|
|
@@ -1080,345 +1104,137 @@ def sanitize_text(text):
|
|
| 1080 |
if not text: return ""
|
| 1081 |
return re.sub(r'[.,;()]', '', text).lower().strip()
|
| 1082 |
|
| 1083 |
-
|
| 1084 |
|
|
|
|
| 1085 |
def extract_entities_with_gemini(query):
|
| 1086 |
if not model: return None
|
| 1087 |
-
|
| 1088 |
system_prompt = f"""
|
| 1089 |
-
|
| 1090 |
-
|
| 1091 |
-
|
| 1092 |
-
|
| 1093 |
-
|
| 1094 |
-
|
| 1095 |
-
|
| 1096 |
-
Consulta de Ejemplo: "Cuando como mucha carne me duele, hincha y se pone rojo el primer dedo del pie."
|
| 1097 |
-
Respuesta JSON de Ejemplo:
|
| 1098 |
-
{{
|
| 1099 |
-
"alimentos": ["carne"],
|
| 1100 |
-
"sintomas": ["dolor", "hinchazón", "rojo", "dedo del pie"]
|
| 1101 |
-
}}
|
| 1102 |
-
|
| 1103 |
-
Ahora, procesa la siguiente consulta real:
|
| 1104 |
-
Consulta: "{query}"
|
| 1105 |
"""
|
| 1106 |
-
|
| 1107 |
try:
|
| 1108 |
response = model.generate_content(system_prompt)
|
| 1109 |
-
|
| 1110 |
-
if
|
| 1111 |
-
|
| 1112 |
-
|
| 1113 |
-
|
| 1114 |
-
json_text = re.search(r'\{.*\}', response.text, re.DOTALL).group(0)
|
| 1115 |
-
|
| 1116 |
-
extracted_data = json.loads(json_text)
|
| 1117 |
-
# Guardar los síntomas originales para la traducción posterior
|
| 1118 |
-
extracted_data['sintomas_originales_ia'] = extracted_data.get('sintomas', [])
|
| 1119 |
-
logger.info("Extracción con Gemini exitosa.")
|
| 1120 |
return extracted_data
|
| 1121 |
-
except
|
| 1122 |
-
logger.error(f"Error en
|
| 1123 |
raise e
|
| 1124 |
|
| 1125 |
def reinforce_entities_with_keywords(entities, query, food_map, master_symptom_map):
|
| 1126 |
-
if not entities:
|
| 1127 |
-
entities = {"alimentos": [], "sintomas": []}
|
| 1128 |
-
|
| 1129 |
query_sanitized = sanitize_text(query)
|
| 1130 |
-
current_foods = entities.get("alimentos", [])
|
| 1131 |
-
current_foods_sanitized = {sanitize_text(f) for f in current_foods}
|
| 1132 |
for food_keyword in food_map.keys():
|
| 1133 |
-
if food_keyword in query_sanitized
|
| 1134 |
-
|
| 1135 |
-
|
| 1136 |
-
current_symptoms = entities.get("sintomas", [])
|
| 1137 |
-
query_to_search_symptoms = " " + query_sanitized + " "
|
| 1138 |
for main_symptom, details in master_symptom_map.items():
|
| 1139 |
-
|
| 1140 |
-
|
| 1141 |
-
|
| 1142 |
-
|
| 1143 |
-
|
| 1144 |
-
if main_symptom not in current_symptoms:
|
| 1145 |
-
logger.info(f"Sistema de respaldo: Normalizando '{phrase}' a '{main_symptom}'.")
|
| 1146 |
-
current_symptoms.append(main_symptom)
|
| 1147 |
-
query_to_search_symptoms = query_to_search_symptoms.replace(" " + phrase + " ", " ")
|
| 1148 |
-
|
| 1149 |
-
entities["sintomas"] = list(set(current_symptoms))
|
| 1150 |
return entities
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1151 |
def find_best_matches_hybrid(entities, data):
|
| 1152 |
-
"""
|
| 1153 |
-
Motor de búsqueda semántico y holístico (Versión Final).
|
| 1154 |
-
Crea una "nube de palabras clave" para el usuario y para cada entrada de la BD,
|
| 1155 |
-
y puntúa basándose en el tamaño de su intersección.
|
| 1156 |
-
"""
|
| 1157 |
if not entities or not data: return []
|
| 1158 |
-
|
| 1159 |
-
# --- 1. CREAR LA "NUBE DE PALABRAS CLAVE DEL USUARIO" ---
|
| 1160 |
user_symptoms_list = entities.get("sintomas", [])
|
| 1161 |
user_foods_list = entities.get("alimentos", [])
|
| 1162 |
-
|
| 1163 |
-
# Combinar síntomas y alimentos en un solo texto
|
| 1164 |
user_text = " ".join(user_symptoms_list) + " " + " ".join(user_foods_list)
|
| 1165 |
-
|
| 1166 |
-
|
| 1167 |
-
# Expandir con el conocimiento bioquímico de FOOD_TO_COMPOUND_MAP
|
| 1168 |
-
user_keywords_expanded = set(user_keywords_base)
|
| 1169 |
-
for food in user_foods_list:
|
| 1170 |
-
if food in FOOD_TO_COMPOUND_MAP:
|
| 1171 |
-
user_keywords_expanded.update(FOOD_TO_COMPOUND_MAP[food])
|
| 1172 |
-
|
| 1173 |
-
user_keywords = user_keywords_expanded
|
| 1174 |
-
|
| 1175 |
-
RARE_CONDITIONS = [
|
| 1176 |
-
"Porfiria Aguda Intermitente (PAI).", "Enfermedad de Refsum del Adulto.",
|
| 1177 |
-
"Ataxia por Gluten.", "Encefalopatía por Gluten.", "Enfermedad de Wilson.",
|
| 1178 |
-
"Deficiencia de Ornitina Transcarbamilasa."
|
| 1179 |
-
]
|
| 1180 |
|
| 1181 |
results = []
|
| 1182 |
for entry in data:
|
| 1183 |
-
|
| 1184 |
-
db_food_text = entry.get("compuesto_alimento", "")
|
| 1185 |
-
db_symptoms_text = " ".join(entry.get("sintomas_clave", []))
|
| 1186 |
-
|
| 1187 |
-
db_text = db_food_text + " " + db_symptoms_text
|
| 1188 |
db_keywords = set(re.findall(r'\b\w+\b', sanitize_text(db_text)))
|
| 1189 |
intersection = user_keywords.intersection(db_keywords)
|
| 1190 |
-
|
| 1191 |
-
# Solo procesar si hay al menos una palabra en común
|
| 1192 |
if intersection:
|
| 1193 |
-
|
| 1194 |
-
|
| 1195 |
-
|
| 1196 |
-
|
| 1197 |
-
# Ponderación por rareza
|
| 1198 |
-
condition_name = entry.get("condicion_asociada", "")
|
| 1199 |
-
if condition_name in RARE_CONDITIONS:
|
| 1200 |
-
final_score = base_score * 0.4
|
| 1201 |
-
else:
|
| 1202 |
-
final_score = base_score * 1.0
|
| 1203 |
-
|
| 1204 |
-
score_details['total'] = int(final_score)
|
| 1205 |
-
|
| 1206 |
-
# Añadir a resultados si supera un umbral mínimo para evitar ruido
|
| 1207 |
-
if score_details['total'] > 10: # Umbral bajo para permitir coincidencias débiles pero relevantes
|
| 1208 |
-
results.append({
|
| 1209 |
-
'entry': entry,
|
| 1210 |
-
'score': score_details,
|
| 1211 |
-
'matched_keywords': list(intersection) # Guardamos las palabras coincidentes
|
| 1212 |
-
})
|
| 1213 |
-
|
| 1214 |
-
if not results: return []
|
| 1215 |
return sorted(results, key=lambda x: x['score']['total'], reverse=True)
|
| 1216 |
-
@retry(wait=wait_random_exponential(min=1, max=10), stop=stop_after_attempt(3))
|
| 1217 |
-
@retry(wait=wait_random_exponential(min=1, max=10), stop=stop_after_attempt(3))
|
| 1218 |
-
def translate_symptoms_with_gemini(symptoms_list, master_symptom_map):
|
| 1219 |
-
"""
|
| 1220 |
-
Usa la IA para traducir síntomas coloquiales a términos clínicos estandarizados
|
| 1221 |
-
de nuestro MASTER_SYMPTOM_MAP.
|
| 1222 |
-
"""
|
| 1223 |
-
if not symptoms_list or not model:
|
| 1224 |
-
return []
|
| 1225 |
-
|
| 1226 |
-
# Crear una lista de los términos clínicos que la IA puede elegir
|
| 1227 |
-
clinical_terms = list(master_symptom_map.keys())
|
| 1228 |
-
|
| 1229 |
-
# Crear una descripción para cada término clínico para darle más contexto a la IA
|
| 1230 |
-
contextual_terms = []
|
| 1231 |
-
for term in clinical_terms:
|
| 1232 |
-
description = ", ".join(master_symptom_map[term].get("frases_es", []))
|
| 1233 |
-
contextual_terms.append(f"- {term}: (descrito como: {description})")
|
| 1234 |
-
|
| 1235 |
-
contextual_terms_str = "\n".join(contextual_terms)
|
| 1236 |
-
symptoms_str = ", ".join(symptoms_list)
|
| 1237 |
-
|
| 1238 |
-
system_prompt = f"""
|
| 1239 |
-
Eres un experto en terminología médica. Tu única tarea es mapear una lista de síntomas descritos por un usuario a una lista de términos clínicos estandarizados.
|
| 1240 |
-
|
| 1241 |
-
LISTA DE TÉRMINOS CLÍNICOS POSIBLES:
|
| 1242 |
-
{contextual_terms_str}
|
| 1243 |
-
|
| 1244 |
-
SÍNTOMAS DEL USUARIO A ANALIZAR:
|
| 1245 |
-
"{symptoms_str}"
|
| 1246 |
-
|
| 1247 |
-
INSTRUCCIONES:
|
| 1248 |
-
1. Lee cada síntoma del usuario.
|
| 1249 |
-
2. Encuentra el término clínico más apropiado de la lista proporcionada.
|
| 1250 |
-
3. Si un síntoma del usuario ya es un término clínico, simplemente inclúyelo.
|
| 1251 |
-
4. Si no encuentras una coincidencia clara para un síntoma, ignóralo.
|
| 1252 |
-
5. Devuelve ÚNICAMENTE una lista JSON con los términos clínicos estandarizados.
|
| 1253 |
-
|
| 1254 |
-
Ejemplo:
|
| 1255 |
-
Si los síntomas del usuario son ["crecimiento de un bulto en el cuello", "cansancio"], la respuesta debe ser:
|
| 1256 |
-
["bocio", "fatiga"]
|
| 1257 |
-
"""
|
| 1258 |
-
|
| 1259 |
-
try:
|
| 1260 |
-
response = model.generate_content(system_prompt)
|
| 1261 |
-
# Extraer la lista JSON de la respuesta
|
| 1262 |
-
match = re.search(r'\[.*?\]', response.text.replace("'", '"'))
|
| 1263 |
-
if match:
|
| 1264 |
-
translated_list = json.loads(match.group(0))
|
| 1265 |
-
logger.info(f"Síntomas traducidos por IA: {symptoms_list} -> {translated_list}")
|
| 1266 |
-
return translated_list
|
| 1267 |
-
except (Exception, google.api_core.exceptions.GoogleAPICallError) as e:
|
| 1268 |
-
logger.error(f"Error en la traducción de síntomas con Gemini: {e}")
|
| 1269 |
-
raise e # Para que tenacity reintente
|
| 1270 |
-
|
| 1271 |
-
return []
|
| 1272 |
-
|
| 1273 |
|
| 1274 |
def find_best_foodb_matches(user_foods_es, foodb_index_keys, food_name_map, limit=3):
|
| 1275 |
-
|
| 1276 |
-
Encuentra una lista de las mejores coincidencias de alimentos en FoodB,
|
| 1277 |
-
utilizando búsqueda de palabras completas para máxima precisión.
|
| 1278 |
-
"""
|
| 1279 |
-
if not user_foods_es:
|
| 1280 |
-
return []
|
| 1281 |
-
|
| 1282 |
search_terms_en = set()
|
| 1283 |
for food_es in user_foods_es:
|
| 1284 |
for key_es, value_en_list in food_name_map.items():
|
| 1285 |
-
if key_es in food_es.lower():
|
| 1286 |
-
|
| 1287 |
-
|
| 1288 |
-
if not search_terms_en:
|
| 1289 |
-
return []
|
| 1290 |
-
|
| 1291 |
-
sorted_terms = sorted(list(search_terms_en), key=len, reverse=True)
|
| 1292 |
found_matches = []
|
| 1293 |
-
|
| 1294 |
-
|
| 1295 |
-
|
| 1296 |
-
|
| 1297 |
-
# Prioridad 2: Coincidencia de palabra completa (usando regex)
|
| 1298 |
-
# Esto encontrará 'beef' en 'Beef, steak' pero no 'lamb' en 'Lambsquarters'
|
| 1299 |
-
regex = r'\b' + re.escape(term) + r'\b'
|
| 1300 |
-
all_possible_matches.extend([key for key in foodb_index_keys if re.search(regex, key, re.IGNORECASE)])
|
| 1301 |
|
| 1302 |
-
|
| 1303 |
-
for match in all_possible_matches:
|
| 1304 |
-
if match not in found_matches:
|
| 1305 |
-
found_matches.append(match)
|
| 1306 |
|
| 1307 |
-
|
| 1308 |
-
|
| 1309 |
def generate_detailed_analysis(query, match):
|
| 1310 |
-
if not model: return "Error:
|
| 1311 |
-
if not match or not isinstance(match, dict):
|
| 1312 |
-
logger.error("Datos de coincidencia inválidos para análisis detallado.")
|
| 1313 |
-
return "Error interno al generar análisis."
|
| 1314 |
-
|
| 1315 |
prompt_parts = [
|
| 1316 |
-
"
|
| 1317 |
-
f
|
| 1318 |
-
f
|
| 1319 |
-
f
|
| 1320 |
-
f
|
| 1321 |
-
|
| 1322 |
-
|
| 1323 |
-
"
|
| 1324 |
-
|
| 1325 |
-
|
| 1326 |
-
|
| 1327 |
-
|
| 1328 |
-
|
| 1329 |
-
'Explica el mecanismo fisiológico mencionado.',
|
| 1330 |
-
|
| 1331 |
-
"\n### Plan de Acción Práctico y Recomendaciones",
|
| 1332 |
-
"Aquí tienes una guía práctica. Recuerda que es fundamental conversar sobre estos puntos con un profesional de la salud cualificado:",
|
| 1333 |
-
|
| 1334 |
-
f'''* **Alimentos a Limitar o Evitar:** El grupo principal a observar son aquellos ricos en **[identifica y nombra el compuesto/grupo principal del campo "Alimentos implicados"]**. Concretamente, considera reducir o eliminar:
|
| 1335 |
-
* **[Ejemplo 1 de alimento claro y común extraído del campo "Alimentos implicados"]**
|
| 1336 |
-
* **[Ejemplo 2 de alimento claro y común extraído del campo "Alimentos implicados"]**
|
| 1337 |
-
* **[Ejemplo 3 de alimento claro y común extraído del campo "Alimentos implicados"]**''',
|
| 1338 |
-
|
| 1339 |
-
f'''* **Alternativas y Reemplazos Seguros:** Para que no sientas que te limitas, si debes reducir **[el grupo de alimentos problemático]**, puedes probar con:
|
| 1340 |
-
* **[Sugerencia 1 de reemplazo lógico usando tu conocimiento general.]**
|
| 1341 |
-
* **[Sugerencia 2 de reemplazo lógico y común.]**''',
|
| 1342 |
-
|
| 1343 |
-
f'''* **Consejos Clave y Pistas Ocultas:** A menudo, el compuesto problemático se esconde donde menos lo esperas. **[Ofrece un consejo práctico y específico para la condición.]**''',
|
| 1344 |
-
|
| 1345 |
-
f'''* **Conversa con un Profesional:** Este es el paso más importante. Comparte estos resultados con tu médico o un nutricionista. Ellos pueden guiarte en una **dieta de eliminación y reintroducción controlada** para confirmar la sensibilidad y asegurar que tu nutrición siga siendo completa y equilibrada.''',
|
| 1346 |
-
|
| 1347 |
-
"\n### **IMPORTANTE: Descargo de Responsabilidad y Banderas Rojas**",
|
| 1348 |
-
"Este análisis es una herramienta informativa de IA y **NO es un diagnóstico médico.** La información proporcionada no debe sustituir la consulta con un profesional cualificado.",
|
| 1349 |
-
|
| 1350 |
-
"**🚩 BANDERAS ROJAS: ¡Atención!** Ciertas condiciones graves pueden imitar los síntomas de una intolerancia alimentaria. Es crucial que consultes a un médico para descartar problemas serios, especialmente si experimentas alguno de los siguientes escenarios:",
|
| 1351 |
-
|
| 1352 |
-
'''1. **Cáncer Gástrico o de Colon:**
|
| 1353 |
-
* **Síntomas que imitan:** Sensación de plenitud rápida, indigestión (especialmente con carnes), hinchazón y dolor abdominal.
|
| 1354 |
-
* **Banderas Rojas Clave:** **Pérdida de peso inexplicable y no intencionada**, fatiga severa, sangre en las heces (visible o no), o un cambio persistente en los hábitos intestinales.''',
|
| 1355 |
-
|
| 1356 |
-
'''2. **Enfermedad Inflamatoria Intestinal (Crohn o Colitis Ulcerosa):**
|
| 1357 |
-
* **Síntomas que imitan:** Dolor abdominal, diarrea crónica e intolerancia a múltiples alimentos.
|
| 1358 |
-
* **Banderas Rojas Clave:** **Diarrea con sangre o moco**, fiebre recurrente, aftas bucales dolorosas, dolor articular y pérdida de peso.''',
|
| 1359 |
-
|
| 1360 |
-
'''3. **Embarazo:**
|
| 1361 |
-
* **Síntomas que imitan:** Náuseas y vómitos (especialmente matutinos), aversión a ciertos olores o alimentos que antes se toleraban.
|
| 1362 |
-
* **Banderas Rojas Clave:** **Ausencia de menstruación (amenorrea)**, sensibilidad en los senos u otros sintomas de embarazo.''',
|
| 1363 |
-
|
| 1364 |
-
'''4. **Isquemia Mesentérica Crónica ("Angina Intestinal"):**
|
| 1365 |
-
* **Síntomas que imitan:** Dolor abdominal tipo cólico que aparece de forma predecible **15-30 minutos después de comer**, llevando al paciente a desarrollar "miedo a comer".
|
| 1366 |
-
* **Banderas Rojas Clave:** **Pérdida de peso significativa** (porque el paciente evita comer para no tener dolor) y presencia de factores de riesgo cardiovascular (fumador, diabetes, hipertensión).''',
|
| 1367 |
-
|
| 1368 |
-
'''5. **Trastornos de la Vesícula Biliar (Cálculos o Disquinesia):**
|
| 1369 |
-
* **Síntomas que imitan:** Dolor intenso en la parte superior derecha del abdomen, hinchazón y gases, especialmente después de comer alimentos grasos.
|
| 1370 |
-
* **Banderas Rojas Clave:** El dolor es agudo, puede irradiarse a la espalda o al hombro derecho, y puede estar acompañado de **náuseas, vómitos, fiebre o ictericia** (coloración amarillenta de la piel y los ojos).''',
|
| 1371 |
-
|
| 1372 |
-
"**Si tus síntomas son severos, persistentes o se acompañan de alguna de estas banderas rojas, la consulta médica es urgente y prioritaria.**"
|
| 1373 |
]
|
| 1374 |
prompt = "\n".join(prompt_parts)
|
| 1375 |
try:
|
| 1376 |
-
logger.info(f"Generando análisis detallado para {match.get('condicion_asociada')}")
|
| 1377 |
response = model.generate_content(prompt)
|
| 1378 |
-
|
| 1379 |
-
|
| 1380 |
-
|
| 1381 |
-
else:
|
| 1382 |
-
logger.error("La respuesta de Gemini para el análisis detallado fue vacía.")
|
| 1383 |
-
raise ValueError("Respuesta vacía de la API")
|
| 1384 |
-
except (Exception, google.api_core.exceptions.GoogleAPICallError) as e:
|
| 1385 |
-
logger.error(f"Error generando análisis detallado (puede ser reintentado): {e}")
|
| 1386 |
raise e
|
| 1387 |
|
| 1388 |
-
def create_relevance_chart(results):
|
| 1389 |
-
# Modificado para mostrar hasta 10 resultados en el gráfico
|
| 1390 |
-
top_results = results[:10]
|
| 1391 |
-
|
| 1392 |
-
condition_names = [re.sub(r'\(.*\)', '', res['entry']['condicion_asociada']).strip() for res in top_results]
|
| 1393 |
-
chart_data = {"Condición": condition_names, "Relevancia": [res['score']['total'] for res in top_results]}
|
| 1394 |
-
source = pd.DataFrame(chart_data)
|
| 1395 |
-
|
| 1396 |
-
chart = alt.Chart(source).mark_bar(color='#1f77b4').encode(
|
| 1397 |
-
x=alt.X('Relevancia:Q', title='Puntuación de Relevancia'),
|
| 1398 |
-
y=alt.Y('Condición:N', sort='-x', title='', axis=alt.Axis(labelLimit=300)),
|
| 1399 |
-
tooltip=[alt.Tooltip('Condición:N', title='Condición'), alt.Tooltip('Relevancia:Q', title='Puntuación')]
|
| 1400 |
-
).properties(
|
| 1401 |
-
title='Principales Coincidencias según tu Caso'
|
| 1402 |
-
).configure_axis(
|
| 1403 |
-
labelFontSize=12,
|
| 1404 |
-
titleFontSize=14
|
| 1405 |
-
).configure_title(
|
| 1406 |
-
fontSize=16,
|
| 1407 |
-
anchor='start'
|
| 1408 |
-
)
|
| 1409 |
-
|
| 1410 |
-
return chart
|
| 1411 |
def generate_neuro_report_text(entities, food_map, neuro_map):
|
| 1412 |
-
"""
|
| 1413 |
-
|
| 1414 |
-
"""
|
| 1415 |
-
report_lines = ["\n\n" + "="*50, "🧠 POSIBLES EFECTOS NEUROPSICOLÓGICOS DE LOS COMPONENTES (no incluye contaminantes)", "="*50 + "\n"]
|
| 1416 |
user_foods = entities.get("alimentos", [])
|
| 1417 |
relevant_compounds = set()
|
| 1418 |
if user_foods:
|
| 1419 |
for food in user_foods:
|
| 1420 |
-
if food in food_map:
|
| 1421 |
-
relevant_compounds.update(food_map[food])
|
| 1422 |
|
| 1423 |
found_neuro_effect = False
|
| 1424 |
if relevant_compounds:
|
|
@@ -1426,24 +1242,20 @@ def generate_neuro_report_text(entities, food_map, neuro_map):
|
|
| 1426 |
if compound in neuro_map:
|
| 1427 |
found_neuro_effect = True
|
| 1428 |
effect_info = neuro_map[compound]
|
| 1429 |
-
report_lines.append(f"
|
| 1430 |
-
report_lines.append(f"
|
| 1431 |
|
| 1432 |
if not found_neuro_effect:
|
| 1433 |
-
report_lines.append("No se
|
| 1434 |
-
|
| 1435 |
return "\n".join(report_lines)
|
| 1436 |
|
| 1437 |
def generate_molecular_report_text(best_match, entities, foodb_index, food_name_map, synonym_map, triggers_map):
|
| 1438 |
-
"""
|
| 1439 |
-
|
| 1440 |
-
"""
|
| 1441 |
-
report_lines = ["\n\n" + "="*50, "🔬 COMPONENTES MOLECULARES DEL DIAGNÓSTICO", "="*50 + "\n"]
|
| 1442 |
user_foods_mentioned = entities.get("alimentos", [])
|
| 1443 |
|
| 1444 |
if not user_foods_mentioned:
|
| 1445 |
-
|
| 1446 |
-
return "\n".join(report_lines)
|
| 1447 |
|
| 1448 |
initial_clues = set()
|
| 1449 |
direct_text = best_match.get("compuesto_alimento", "").lower()
|
|
@@ -1458,18 +1270,12 @@ def generate_molecular_report_text(best_match, entities, foodb_index, food_name_
|
|
| 1458 |
final_search_keywords = set()
|
| 1459 |
for clue in initial_clues:
|
| 1460 |
final_search_keywords.add(clue)
|
| 1461 |
-
if clue in synonym_map:
|
| 1462 |
-
final_search_keywords.update(synonym_map[clue])
|
| 1463 |
|
| 1464 |
-
if not final_search_keywords:
|
| 1465 |
-
report_lines.append(f"No se pudieron determinar los compuestos moleculares clave para '{best_match.get('condicion_asociada')}'.")
|
| 1466 |
-
return "\n".join(report_lines)
|
| 1467 |
-
|
| 1468 |
best_food_matches = find_best_foodb_matches(user_foods_mentioned, foodb_index.keys(), food_name_map)
|
| 1469 |
|
| 1470 |
if not best_food_matches:
|
| 1471 |
-
|
| 1472 |
-
return "\n".join(report_lines)
|
| 1473 |
|
| 1474 |
found_any_data = False
|
| 1475 |
for food_key in best_food_matches:
|
|
@@ -1481,239 +1287,137 @@ def generate_molecular_report_text(best_match, entities, foodb_index, food_name_
|
|
| 1481 |
|
| 1482 |
if relevant_compounds:
|
| 1483 |
found_any_data = True
|
| 1484 |
-
report_lines.append(f"\n
|
| 1485 |
-
|
| 1486 |
for item in relevant_compounds:
|
| 1487 |
-
if item['compound'] not in
|
| 1488 |
-
report_lines.append(f"Compuesto: {item['compound']}")
|
| 1489 |
-
|
| 1490 |
-
unique_compounds_shown.add(item['compound'])
|
| 1491 |
|
| 1492 |
if not found_any_data:
|
| 1493 |
-
|
| 1494 |
|
| 1495 |
return "\n".join(report_lines)
|
| 1496 |
|
| 1497 |
-
def
|
| 1498 |
-
|
| 1499 |
-
|
| 1500 |
-
|
| 1501 |
-
|
| 1502 |
-
|
| 1503 |
-
|
| 1504 |
-
|
| 1505 |
-
|
| 1506 |
-
|
| 1507 |
-
|
|
|
|
| 1508 |
|
| 1509 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1510 |
with col_img1:
|
| 1511 |
if os.path.exists("imagen.png"): st.image("imagen.png", width=150)
|
| 1512 |
with col_text:
|
| 1513 |
st.title("El Detective de Alimentos")
|
| 1514 |
-
st.markdown("#####
|
| 1515 |
with col_img2:
|
| 1516 |
if os.path.exists("buho.png"): st.image("buho.png", width=120)
|
| 1517 |
-
st.markdown("---")
|
| 1518 |
|
| 1519 |
if 'search_results' not in st.session_state: st.session_state.search_results = None
|
| 1520 |
if 'user_query' not in st.session_state: st.session_state.user_query = ""
|
| 1521 |
if 'entities' not in st.session_state: st.session_state.entities = None
|
| 1522 |
if 'analysis_cache' not in st.session_state: st.session_state.analysis_cache = {}
|
| 1523 |
-
if 'query' not in st.session_state: st.session_state.query = ""
|
| 1524 |
-
if 'start_analysis' not in st.session_state: st.session_state.start_analysis = False
|
| 1525 |
|
| 1526 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1527 |
st.session_state.search_results = None
|
| 1528 |
-
st.session_state.user_query = ""
|
| 1529 |
-
st.session_state.entities = None
|
| 1530 |
st.session_state.analysis_cache = {}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1531 |
|
| 1532 |
-
|
| 1533 |
-
st.session_state.
|
| 1534 |
-
|
| 1535 |
-
|
| 1536 |
-
st.write("**¿No sabes por dónde empezar? Haz clic en un ejemplo para analizar un caso estructurado:** (Los procesos toman 3 o 4 segundos)")
|
| 1537 |
-
example_cols = st.columns(3)
|
| 1538 |
-
|
| 1539 |
-
# --- EJEMPLO 1: GOTA ---
|
| 1540 |
-
example1_text = "SÍNTOMAS: dolor intenso, hinchazón y se pone rojo el primer dedo del pie.\nALIMENTOS: carne roja, cerveza."
|
| 1541 |
-
if example_cols[0].button("Caso: Dolor en el pie por carne"):
|
| 1542 |
-
set_query_and_trigger_analysis(example1_text)
|
| 1543 |
-
|
| 1544 |
-
# --- EJEMPLO 2: INTOLERANCIA A LA LACTOSA ---
|
| 1545 |
-
example2_text = "SÍNTOMAS: muchos gases, hinchazón abdominal.\nALIMENTOS: leche, queso."
|
| 1546 |
-
if example_cols[1].button("Caso: Hinchazón por lácteos"):
|
| 1547 |
-
set_query_and_trigger_analysis(example2_text)
|
| 1548 |
-
|
| 1549 |
-
# --- EJEMPLO 3: MIGRAÑA POR VINO ---
|
| 1550 |
-
example3_text = "SÍNTOMAS: dolor de cabeza, migraña.\nALIMENTOS: vino tinto."
|
| 1551 |
-
if example_cols[2].button("Caso: Dolor de cabeza por vino"):
|
| 1552 |
-
set_query_and_trigger_analysis(example3_text)
|
| 1553 |
-
|
| 1554 |
-
# Muestra el texto del ejemplo debajo de los botones para que el usuario aprenda el formato
|
| 1555 |
-
with st.expander("Ver formato recomendado para los ejemplos"):
|
| 1556 |
-
st.code(f"Ejemplo 1 (Gota):\n{example1_text}", language='text')
|
| 1557 |
-
st.code(f"Ejemplo 2 (Lactosa):\n{example2_text}", language='text')
|
| 1558 |
-
st.code(f"Ejemplo 3 (Vino):\n{example3_text}", language='text')
|
| 1559 |
-
|
| 1560 |
-
# Definimos la variable placeholder_text justo antes de usarla en el formulario
|
| 1561 |
-
placeholder_text = """Ejemplo:
|
| 1562 |
-
SÍNTOMAS: dolor de cabeza, migraña, confusión.
|
| 1563 |
-
ALIMENTOS: vino tinto, queso curado.
|
| 1564 |
-
"""
|
| 1565 |
-
with st.form(key="search_form"):
|
| 1566 |
-
st.text_area(
|
| 1567 |
-
"Describe tu caso aquí, separando SÍNTOMAS y ALIMENTOS:",
|
| 1568 |
-
height=200,
|
| 1569 |
-
key="query",
|
| 1570 |
-
placeholder=placeholder_text
|
| 1571 |
-
)
|
| 1572 |
-
submitted = st.form_submit_button("Analizar mi caso", type="primary")
|
| 1573 |
-
if submitted:
|
| 1574 |
-
st.session_state.start_analysis = True
|
| 1575 |
-
|
| 1576 |
-
if st.session_state.start_analysis:
|
| 1577 |
-
st.session_state.start_analysis = False
|
| 1578 |
-
query_to_analyze = st.session_state.query
|
| 1579 |
|
| 1580 |
-
|
| 1581 |
-
st.session_state.user_query = query_to_analyze
|
| 1582 |
|
| 1583 |
-
|
| 1584 |
-
|
| 1585 |
-
|
| 1586 |
-
st.
|
| 1587 |
-
else:
|
| 1588 |
-
with st.spinner("🧠 Interpretando tu caso y buscando pistas..."):
|
| 1589 |
-
# Paso A: Extracción inicial
|
| 1590 |
-
initial_entities = None
|
| 1591 |
try:
|
| 1592 |
-
|
| 1593 |
-
|
| 1594 |
-
|
| 1595 |
-
|
| 1596 |
-
# Paso B: Refuerzo y normalización con el sistema de respaldo
|
| 1597 |
-
reinforced_entities = reinforce_entities_with_keywords(initial_entities, query_to_analyze, FOOD_TO_COMPOUND_MAP, MASTER_SYMPTOM_MAP)
|
| 1598 |
-
|
| 1599 |
-
# --- PASO C: TRADUCCIÓN DE SÍNTOMAS CON IA (NUEVA LÓGICA) ---
|
| 1600 |
-
final_symptoms = set(reinforced_entities.get("sintomas", []))
|
| 1601 |
-
untranslated_symptoms = reinforced_entities.get("sintomas_originales_ia", reinforced_entities.get("sintomas", []))
|
| 1602 |
|
| 1603 |
-
|
| 1604 |
-
|
| 1605 |
-
with st.spinner("🧠 Profundizando en la interpretación de los síntomas..."):
|
| 1606 |
-
translated_symptoms = translate_symptoms_with_gemini(untranslated_symptoms, MASTER_SYMPTOM_MAP)
|
| 1607 |
-
final_symptoms.update(translated_symptoms)
|
| 1608 |
-
except Exception as e:
|
| 1609 |
-
logger.error("La traducción de síntomas con IA falló después de varios intentos.")
|
| 1610 |
-
|
| 1611 |
-
# Unir todo en la entidad final
|
| 1612 |
-
final_entities = {
|
| 1613 |
-
"alimentos": reinforced_entities.get("alimentos", []),
|
| 1614 |
-
"sintomas": list(final_symptoms)
|
| 1615 |
-
}
|
| 1616 |
-
st.session_state.entities = final_entities
|
| 1617 |
-
|
| 1618 |
-
if final_entities and (final_entities.get("alimentos") or final_entities.get("sintomas")):
|
| 1619 |
-
info_str = f"Pistas identificadas - Alimentos: {', '.join(final_entities.get('alimentos',[])) or 'Ninguno'}, Síntomas: {', '.join(final_entities.get('sintomas',[])) or 'Ninguno'}"
|
| 1620 |
-
st.info(info_str)
|
| 1621 |
-
with st.spinner("🔬 Cruzando información y calculando relevancia..."):
|
| 1622 |
-
results = find_best_matches_hybrid(final_entities, alimentos_data)
|
| 1623 |
-
st.session_state.search_results = results
|
| 1624 |
-
else:
|
| 1625 |
-
st.error("No se pudieron identificar alimentos o síntomas claros en tu descripción. Intenta ser más específico.")
|
| 1626 |
-
st.session_state.search_results = []
|
| 1627 |
-
|
| 1628 |
-
if st.session_state.search_results is not None:
|
| 1629 |
-
results = st.session_state.search_results
|
| 1630 |
|
| 1631 |
-
|
| 1632 |
-
st.
|
| 1633 |
-
|
| 1634 |
-
|
| 1635 |
-
|
| 1636 |
-
|
| 1637 |
-
ai_analysis_text = st.session_state.analysis_cache.get('best_match_analysis', "")
|
| 1638 |
-
if not ai_analysis_text:
|
| 1639 |
-
with st.spinner("✍️ Generando análisis personalizado con IA..."):
|
| 1640 |
-
try:
|
| 1641 |
-
analysis_text = generate_detailed_analysis(st.session_state.user_query, best_match)
|
| 1642 |
-
st.session_state.analysis_cache['best_match_analysis'] = analysis_text
|
| 1643 |
-
ai_analysis_text = analysis_text
|
| 1644 |
-
except Exception as e:
|
| 1645 |
-
st.session_state.analysis_cache['best_match_analysis'] = "❌ No se pudo generar el análisis detallado."
|
| 1646 |
-
ai_analysis_text = st.session_state.analysis_cache['best_match_analysis']
|
| 1647 |
|
| 1648 |
-
|
| 1649 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1650 |
|
| 1651 |
-
|
| 1652 |
-
with col1:
|
| 1653 |
-
st.success(f"Hemos encontrado {len(results)} posible(s) causa(s) relacionada(s) con tu caso.")
|
| 1654 |
-
with col2:
|
| 1655 |
-
base_report_text = generate_report_text(st.session_state.user_query, results)
|
| 1656 |
-
complete_report_string = f"{base_report_text}\n\n{ai_analysis_text}\n{neuro_report_text}\n{molecular_report_text}"
|
| 1657 |
-
word_file_buffer = generate_word_report(complete_report_string)
|
| 1658 |
-
if word_file_buffer:
|
| 1659 |
-
st.download_button(
|
| 1660 |
-
label="📄 Descargar Informe (Word)",
|
| 1661 |
-
data=word_file_buffer,
|
| 1662 |
-
file_name=f"Informe_Detective_Alimentos_{datetime.now().strftime('%Y%m%d')}.docx",
|
| 1663 |
-
mime="application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
| 1664 |
-
key="download_word_report"
|
| 1665 |
-
)
|
| 1666 |
|
| 1667 |
-
|
| 1668 |
-
|
| 1669 |
-
|
| 1670 |
-
|
| 1671 |
-
|
| 1672 |
-
|
| 1673 |
-
|
| 1674 |
-
st.markdown("##### Desglose de la Puntuación de Relevancia:")
|
| 1675 |
-
st.metric("Puntuación de Relevancia Total", f"{best_match_data['score']['total']}", delta="Máxima coincidencia")
|
| 1676 |
-
if 'matched_keywords' in best_match_data and best_match_data['matched_keywords']:
|
| 1677 |
-
st.caption(f"Pistas Clave Coincidentes: {', '.join(best_match_data['matched_keywords'])}")
|
| 1678 |
-
|
| 1679 |
-
with col2_expander:
|
| 1680 |
-
with st.popover("🔬 Componentes Moleculares"):
|
| 1681 |
-
st.markdown(molecular_report_text.replace("=", ""))
|
| 1682 |
-
|
| 1683 |
-
st.markdown("---")
|
| 1684 |
-
with st.container(border=True):
|
| 1685 |
-
st.markdown(neuro_report_text.replace("=", ""))
|
| 1686 |
-
|
| 1687 |
-
st.markdown("---")
|
| 1688 |
-
st.markdown(ai_analysis_text)
|
| 1689 |
|
| 1690 |
-
|
| 1691 |
-
|
| 1692 |
-
|
| 1693 |
-
|
| 1694 |
-
entry = result['entry']
|
| 1695 |
-
st.subheader(f"{i+2}. {entry.get('condicion_asociada')}")
|
| 1696 |
-
col_info, col_action = st.columns([3, 1])
|
| 1697 |
-
|
| 1698 |
-
with col_info:
|
| 1699 |
-
if 'matched_keywords' in result and result['matched_keywords']:
|
| 1700 |
-
st.markdown(f"**Pistas Clave Coincidentes:** {', '.join(result['matched_keywords']).capitalize()}")
|
| 1701 |
-
st.markdown(f"**Alimentos Típicos Asociados:** {entry.get('compuesto_alimento')}")
|
| 1702 |
-
|
| 1703 |
-
with col_action:
|
| 1704 |
-
st.metric("Relevancia", result['score']['total'])
|
| 1705 |
-
analysis_key = f"analysis_{i+2}"
|
| 1706 |
-
|
| 1707 |
-
if st.button("Generar análisis", key=analysis_key, help=f"Generar análisis de IA para {entry.get('condicion_asociada')}"):
|
| 1708 |
-
with st.spinner(f"Generando análisis para {entry.get('condicion_asociada')}..."):
|
| 1709 |
-
try:
|
| 1710 |
-
analysis_text = generate_detailed_analysis(st.session_state.user_query, entry)
|
| 1711 |
-
st.session_state.analysis_cache[analysis_key] = analysis_text
|
| 1712 |
-
except Exception as e:
|
| 1713 |
-
st.session_state.analysis_cache[analysis_key] = f"❌ Error al generar análisis."
|
| 1714 |
-
|
| 1715 |
-
if analysis_key in st.session_state.analysis_cache:
|
| 1716 |
-
st.info(st.session_state.analysis_cache[analysis_key])
|
| 1717 |
-
|
| 1718 |
-
if i < len(results[1:5]) - 1:
|
| 1719 |
-
st.markdown("---")
|
|
|
|
| 13 |
from tenacity import retry, stop_after_attempt, wait_random_exponential
|
| 14 |
from io import BytesIO
|
| 15 |
import docx
|
| 16 |
+
import difflib
|
| 17 |
+
|
| 18 |
st.set_page_config(page_title="El Detective de Alimentos", page_icon="🍎", layout="wide")
|
| 19 |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
| 20 |
logger = logging.getLogger("food_detective_app")
|
| 21 |
+
|
| 22 |
+
# --- 1. TEXTO ESTÁTICO DE SEGURIDAD (AHORRO DE TOKENS) ---
|
| 23 |
+
TEXTO_BANDERAS_ROJAS = """
|
| 24 |
+
\n### **IMPORTANTE: Descargo de Responsabilidad y Banderas Rojas**
|
| 25 |
+
Este análisis es una herramienta informativa de IA y **NO es un diagnóstico médico.** La información proporcionada no debe sustituir la consulta con un profesional cualificado.
|
| 26 |
+
|
| 27 |
+
**🚩 BANDERAS ROJAS: ¡Atención!** Ciertas condiciones graves pueden imitar los síntomas de una intolerancia alimentaria. Es crucial que consultes a un médico para descartar problemas serios, especialmente si experimentas alguno de los siguientes escenarios:
|
| 28 |
+
|
| 29 |
+
1. **Cáncer Gástrico o de Colon:** Síntomas como plenitud rápida, pérdida de peso inexplicable, sangre en heces.
|
| 30 |
+
2. **Enfermedad Inflamatoria Intestinal:** Diarrea con sangre/moco, fiebre recurrente, dolor articular.
|
| 31 |
+
3. **Embarazo:** Náuseas matutinas, ausencia de menstruación.
|
| 32 |
+
4. **Isquemia Mesentérica:** Dolor abdominal predecible 15-30 min después de comer.
|
| 33 |
+
5. **Trastornos de la Vesícula:** Dolor agudo lado derecho tras comer grasas, ictericia.
|
| 34 |
+
|
| 35 |
+
**Si tus síntomas son severos o persistentes, la consulta médica es urgente.**
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
try:
|
| 39 |
if 'GEMINI_API_KEY' in st.secrets:
|
| 40 |
GEMINI_API_KEY = st.secrets['GEMINI_API_KEY']
|
|
|
|
| 47 |
except Exception as e:
|
| 48 |
st.error(f"❌ Error al configurar Gemini API: {e}")
|
| 49 |
st.stop()
|
| 50 |
+
|
| 51 |
@st.cache_resource
|
| 52 |
def get_gemini_model():
|
| 53 |
try:
|
|
|
|
| 56 |
st.error(f"❌ No se pudo cargar el modelo Gemini: {e}")
|
| 57 |
return None
|
| 58 |
model = get_gemini_model()
|
| 59 |
+
|
| 60 |
@st.cache_data
|
| 61 |
def load_data():
|
| 62 |
try:
|
|
|
|
| 73 |
st.error(f"Error cargando los archivos de datos: {e}")
|
| 74 |
return None, None
|
| 75 |
alimentos_data, foodb_index = load_data()
|
| 76 |
+
|
| 77 |
+
|
| 78 |
COMPOUND_SYNONYM_MAP = {
|
| 79 |
"gluten": ["gluten", "gliadin"],
|
| 80 |
"caseína": ["casein", "casomorphin"],
|
|
|
|
| 1078 |
"fuentes_comunes": ["inflamación sistémica modulada por dieta rica en grasas/sacharosa; el pathway es endógeno pero sensible a la dieta y microbiota"]
|
| 1079 |
}
|
| 1080 |
}
|
| 1081 |
+
# --- FIN VARIABLES GRANDES ---
|
| 1082 |
+
|
| 1083 |
def generate_word_report(report_text):
|
|
|
|
|
|
|
|
|
|
| 1084 |
try:
|
| 1085 |
template_path = os.path.join("PLANTILLAS", "PLANTILLA_INTERPRETACION.docx")
|
| 1086 |
+
if not os.path.exists(template_path):
|
| 1087 |
+
doc = docx.Document()
|
| 1088 |
+
doc.add_paragraph(report_text)
|
| 1089 |
+
else:
|
| 1090 |
+
doc = docx.Document(template_path)
|
| 1091 |
+
for paragraph in doc.paragraphs:
|
| 1092 |
+
if '<INTERPRETACION>' in paragraph.text:
|
| 1093 |
+
paragraph.clear()
|
| 1094 |
+
paragraph.add_run(report_text)
|
| 1095 |
doc_io = BytesIO()
|
| 1096 |
doc.save(doc_io)
|
| 1097 |
doc_io.seek(0)
|
|
|
|
| 1098 |
return doc_io
|
|
|
|
| 1099 |
except Exception as e:
|
| 1100 |
logger.error(f"Error al generar el informe de Word: {e}")
|
| 1101 |
return None
|
|
|
|
| 1104 |
if not text: return ""
|
| 1105 |
return re.sub(r'[.,;()]', '', text).lower().strip()
|
| 1106 |
|
| 1107 |
+
# --- FUNCIONES DE EXTRACCIÓN Y LÓGICA (OPTIMIZADAS) ---
|
| 1108 |
|
| 1109 |
+
@retry(wait=wait_random_exponential(min=1, max=10), stop=stop_after_attempt(3))
|
| 1110 |
def extract_entities_with_gemini(query):
|
| 1111 |
if not model: return None
|
| 1112 |
+
# PROMPT TELEGRÁFICO (AHORRO)
|
| 1113 |
system_prompt = f"""
|
| 1114 |
+
Rol: Extractor de Entidades Médicas.
|
| 1115 |
+
Tarea: Analiza el texto y extrae JSON estricto.
|
| 1116 |
+
Campos:
|
| 1117 |
+
1. "alimentos": Lista de comidas/ingredientes.
|
| 1118 |
+
2. "sintomas": Lista de sensaciones/signos físicos.
|
| 1119 |
+
Input: "{query}"
|
| 1120 |
+
Output (JSON Only):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1121 |
"""
|
|
|
|
| 1122 |
try:
|
| 1123 |
response = model.generate_content(system_prompt)
|
| 1124 |
+
text = response.text
|
| 1125 |
+
if "```json" in text: text = text.split("```json")[1].split("```")[0]
|
| 1126 |
+
elif "```" in text: text = text.split("```")[1].split("```")[0]
|
| 1127 |
+
extracted_data = json.loads(text)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1128 |
return extracted_data
|
| 1129 |
+
except Exception as e:
|
| 1130 |
+
logger.error(f"Error en extracción: {e}")
|
| 1131 |
raise e
|
| 1132 |
|
| 1133 |
def reinforce_entities_with_keywords(entities, query, food_map, master_symptom_map):
|
| 1134 |
+
if not entities: entities = {"alimentos": [], "sintomas": []}
|
|
|
|
|
|
|
| 1135 |
query_sanitized = sanitize_text(query)
|
| 1136 |
+
current_foods = set(entities.get("alimentos", []))
|
|
|
|
| 1137 |
for food_keyword in food_map.keys():
|
| 1138 |
+
if food_keyword in query_sanitized: current_foods.add(food_keyword)
|
| 1139 |
+
entities["alimentos"] = list(current_foods)
|
| 1140 |
+
# Síntomas simples (sin IA)
|
| 1141 |
+
current_symptoms = set(entities.get("sintomas", []))
|
|
|
|
| 1142 |
for main_symptom, details in master_symptom_map.items():
|
| 1143 |
+
for phrase in details.get("frases_es", []):
|
| 1144 |
+
if phrase in query_sanitized:
|
| 1145 |
+
current_symptoms.add(main_symptom)
|
| 1146 |
+
break
|
| 1147 |
+
entities["sintomas"] = list(current_symptoms)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1148 |
return entities
|
| 1149 |
+
|
| 1150 |
+
def translate_symptoms_local(symptoms_list, master_symptom_map):
|
| 1151 |
+
if not symptoms_list: return []
|
| 1152 |
+
translated_symptoms = set()
|
| 1153 |
+
inverse_index = {}
|
| 1154 |
+
for clinical_term, details in master_symptom_map.items():
|
| 1155 |
+
inverse_index[clinical_term.lower()] = clinical_term
|
| 1156 |
+
for phrase in details.get("frases_es", []):
|
| 1157 |
+
inverse_index[phrase.lower()] = clinical_term
|
| 1158 |
+
all_keys = list(inverse_index.keys())
|
| 1159 |
+
for symptom in symptoms_list:
|
| 1160 |
+
symptom_clean = symptom.lower().strip()
|
| 1161 |
+
if symptom_clean in inverse_index:
|
| 1162 |
+
translated_symptoms.add(inverse_index[symptom_clean])
|
| 1163 |
+
continue
|
| 1164 |
+
matches = difflib.get_close_matches(symptom_clean, all_keys, n=1, cutoff=0.7)
|
| 1165 |
+
if matches:
|
| 1166 |
+
translated_symptoms.add(inverse_index[matches[0]])
|
| 1167 |
+
else:
|
| 1168 |
+
translated_symptoms.add(symptom)
|
| 1169 |
+
return list(translated_symptoms)
|
| 1170 |
+
|
| 1171 |
def find_best_matches_hybrid(entities, data):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1172 |
if not entities or not data: return []
|
|
|
|
|
|
|
| 1173 |
user_symptoms_list = entities.get("sintomas", [])
|
| 1174 |
user_foods_list = entities.get("alimentos", [])
|
|
|
|
|
|
|
| 1175 |
user_text = " ".join(user_symptoms_list) + " " + " ".join(user_foods_list)
|
| 1176 |
+
user_keywords = set(re.findall(r'\b\w+\b', sanitize_text(user_text)))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1177 |
|
| 1178 |
results = []
|
| 1179 |
for entry in data:
|
| 1180 |
+
db_text = entry.get("compuesto_alimento", "") + " " + " ".join(entry.get("sintomas_clave", []))
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1181 |
db_keywords = set(re.findall(r'\b\w+\b', sanitize_text(db_text)))
|
| 1182 |
intersection = user_keywords.intersection(db_keywords)
|
|
|
|
|
|
|
| 1183 |
if intersection:
|
| 1184 |
+
score = (len(intersection) ** 2) * 10
|
| 1185 |
+
if score > 10:
|
| 1186 |
+
results.append({'entry': entry, 'score': {'total': score}, 'matched_keywords': list(intersection)})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1187 |
return sorted(results, key=lambda x: x['score']['total'], reverse=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1188 |
|
| 1189 |
def find_best_foodb_matches(user_foods_es, foodb_index_keys, food_name_map, limit=3):
|
| 1190 |
+
if not user_foods_es: return []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1191 |
search_terms_en = set()
|
| 1192 |
for food_es in user_foods_es:
|
| 1193 |
for key_es, value_en_list in food_name_map.items():
|
| 1194 |
+
if key_es in food_es.lower(): search_terms_en.update(value_en_list)
|
| 1195 |
+
if not search_terms_en: return []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1196 |
found_matches = []
|
| 1197 |
+
for term in search_terms_en:
|
| 1198 |
+
matches = difflib.get_close_matches(term, foodb_index_keys, n=limit, cutoff=0.6)
|
| 1199 |
+
found_matches.extend(matches)
|
| 1200 |
+
return list(set(found_matches))[:limit]
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1201 |
|
| 1202 |
+
# --- GENERADORES DE REPORTES (RESTAURADOS COMPLETOS) ---
|
|
|
|
|
|
|
|
|
|
| 1203 |
|
| 1204 |
+
@retry(wait=wait_random_exponential(min=1, max=10), stop=stop_after_attempt(3))
|
|
|
|
| 1205 |
def generate_detailed_analysis(query, match):
|
| 1206 |
+
if not model: return "Error: IA no disponible."
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1207 |
prompt_parts = [
|
| 1208 |
+
"Rol: Nutricionista Funcional.",
|
| 1209 |
+
f"Caso: {query}",
|
| 1210 |
+
f"Hipótesis: {match.get('condicion_asociada')}",
|
| 1211 |
+
f"Mecanismo: {match.get('mecanismo_fisiologico')}",
|
| 1212 |
+
f"Alimentos clave: {match.get('compuesto_alimento')}",
|
| 1213 |
+
"Tarea: Escribir análisis en Markdown.",
|
| 1214 |
+
"Estructura:",
|
| 1215 |
+
"1. Saludo y posible causa.",
|
| 1216 |
+
"2. Explicación del mecanismo.",
|
| 1217 |
+
"3. Alimentos a evitar.",
|
| 1218 |
+
"4. Reemplazos sugeridos.",
|
| 1219 |
+
"5. Consejo práctico.",
|
| 1220 |
+
"IMPORTANTE: NO incluyas descargos de responsabilidad."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1221 |
]
|
| 1222 |
prompt = "\n".join(prompt_parts)
|
| 1223 |
try:
|
|
|
|
| 1224 |
response = model.generate_content(prompt)
|
| 1225 |
+
return response.text + TEXTO_BANDERAS_ROJAS # Concatenación local
|
| 1226 |
+
except Exception as e:
|
| 1227 |
+
logger.error(f"Error análisis: {e}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1228 |
raise e
|
| 1229 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1230 |
def generate_neuro_report_text(entities, food_map, neuro_map):
|
| 1231 |
+
"""Genera reporte neuropsicológico basado en mapas locales (Costo 0 tokens)."""
|
| 1232 |
+
report_lines = ["\n### 🧠 Efectos Neuropsicológicos Posibles"]
|
|
|
|
|
|
|
| 1233 |
user_foods = entities.get("alimentos", [])
|
| 1234 |
relevant_compounds = set()
|
| 1235 |
if user_foods:
|
| 1236 |
for food in user_foods:
|
| 1237 |
+
if food in food_map: relevant_compounds.update(food_map[food])
|
|
|
|
| 1238 |
|
| 1239 |
found_neuro_effect = False
|
| 1240 |
if relevant_compounds:
|
|
|
|
| 1242 |
if compound in neuro_map:
|
| 1243 |
found_neuro_effect = True
|
| 1244 |
effect_info = neuro_map[compound]
|
| 1245 |
+
report_lines.append(f"**Componente: {compound.capitalize()}**")
|
| 1246 |
+
report_lines.append(f"_{effect_info['efecto_neuropsicologico']}_\n")
|
| 1247 |
|
| 1248 |
if not found_neuro_effect:
|
| 1249 |
+
report_lines.append("No se detectaron efectos neuropsicológicos específicos en nuestra base de datos para estos alimentos.")
|
|
|
|
| 1250 |
return "\n".join(report_lines)
|
| 1251 |
|
| 1252 |
def generate_molecular_report_text(best_match, entities, foodb_index, food_name_map, synonym_map, triggers_map):
|
| 1253 |
+
"""Genera reporte molecular detallado usando FoodB local (Costo 0 tokens)."""
|
| 1254 |
+
report_lines = ["\n### 🔬 Análisis Molecular (FoodB)"]
|
|
|
|
|
|
|
| 1255 |
user_foods_mentioned = entities.get("alimentos", [])
|
| 1256 |
|
| 1257 |
if not user_foods_mentioned:
|
| 1258 |
+
return "No se identificaron alimentos específicos para el análisis molecular."
|
|
|
|
| 1259 |
|
| 1260 |
initial_clues = set()
|
| 1261 |
direct_text = best_match.get("compuesto_alimento", "").lower()
|
|
|
|
| 1270 |
final_search_keywords = set()
|
| 1271 |
for clue in initial_clues:
|
| 1272 |
final_search_keywords.add(clue)
|
| 1273 |
+
if clue in synonym_map: final_search_keywords.update(synonym_map[clue])
|
|
|
|
| 1274 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1275 |
best_food_matches = find_best_foodb_matches(user_foods_mentioned, foodb_index.keys(), food_name_map)
|
| 1276 |
|
| 1277 |
if not best_food_matches:
|
| 1278 |
+
return "No se encontraron datos moleculares detallados para los alimentos mencionados."
|
|
|
|
| 1279 |
|
| 1280 |
found_any_data = False
|
| 1281 |
for food_key in best_food_matches:
|
|
|
|
| 1287 |
|
| 1288 |
if relevant_compounds:
|
| 1289 |
found_any_data = True
|
| 1290 |
+
report_lines.append(f"\n**Alimento Analizado: {food_key.capitalize()}**")
|
| 1291 |
+
unique_compounds = set()
|
| 1292 |
for item in relevant_compounds:
|
| 1293 |
+
if item['compound'] not in unique_compounds:
|
| 1294 |
+
report_lines.append(f"- Compuesto: `{item['compound']}` (Vínculo potencial con {best_match.get('condicion_asociada')})")
|
| 1295 |
+
unique_compounds.add(item['compound'])
|
|
|
|
| 1296 |
|
| 1297 |
if not found_any_data:
|
| 1298 |
+
return f"No se encontraron los compuestos moleculares específicos de esta condición en los alimentos analizados."
|
| 1299 |
|
| 1300 |
return "\n".join(report_lines)
|
| 1301 |
|
| 1302 |
+
def create_relevance_chart(results):
|
| 1303 |
+
top_results = results[:10]
|
| 1304 |
+
data = pd.DataFrame({
|
| 1305 |
+
"Condición": [re.sub(r'\(.*\)', '', r['entry']['condicion_asociada']).strip() for r in top_results],
|
| 1306 |
+
"Relevancia": [r['score']['total'] for r in top_results]
|
| 1307 |
+
})
|
| 1308 |
+
chart = alt.Chart(data).mark_bar().encode(
|
| 1309 |
+
x='Relevancia',
|
| 1310 |
+
y=alt.Y('Condición', sort='-x'),
|
| 1311 |
+
tooltip=['Condición', 'Relevancia']
|
| 1312 |
+
).properties(title='Top Coincidencias')
|
| 1313 |
+
return chart
|
| 1314 |
|
| 1315 |
+
def generate_word_report(report_text):
|
| 1316 |
+
# Simulación simple para no requerir plantilla física en el ejemplo
|
| 1317 |
+
doc = docx.Document()
|
| 1318 |
+
doc.add_paragraph(report_text)
|
| 1319 |
+
doc_io = BytesIO()
|
| 1320 |
+
doc.save(doc_io)
|
| 1321 |
+
doc_io.seek(0)
|
| 1322 |
+
return doc_io
|
| 1323 |
+
|
| 1324 |
+
# --- INTERFAZ DE USUARIO ---
|
| 1325 |
+
col_img1, col_text, col_img2 = st.columns([1, 4, 1])
|
| 1326 |
with col_img1:
|
| 1327 |
if os.path.exists("imagen.png"): st.image("imagen.png", width=150)
|
| 1328 |
with col_text:
|
| 1329 |
st.title("El Detective de Alimentos")
|
| 1330 |
+
st.markdown("##### Describe tus SÍNTOMAS y los ALIMENTOS que sospechas.")
|
| 1331 |
with col_img2:
|
| 1332 |
if os.path.exists("buho.png"): st.image("buho.png", width=120)
|
|
|
|
| 1333 |
|
| 1334 |
if 'search_results' not in st.session_state: st.session_state.search_results = None
|
| 1335 |
if 'user_query' not in st.session_state: st.session_state.user_query = ""
|
| 1336 |
if 'entities' not in st.session_state: st.session_state.entities = None
|
| 1337 |
if 'analysis_cache' not in st.session_state: st.session_state.analysis_cache = {}
|
|
|
|
|
|
|
| 1338 |
|
| 1339 |
+
with st.form(key="search_form"):
|
| 1340 |
+
query = st.text_area("Tu Caso:", height=150, placeholder="Ej: Me duele la cabeza y me siento hinchado cuando como queso y tomo vino.")
|
| 1341 |
+
submitted = st.form_submit_button("Analizar Caso", type="primary")
|
| 1342 |
+
|
| 1343 |
+
if submitted and query:
|
| 1344 |
+
st.session_state.user_query = query
|
| 1345 |
st.session_state.search_results = None
|
|
|
|
|
|
|
| 1346 |
st.session_state.analysis_cache = {}
|
| 1347 |
+
|
| 1348 |
+
with st.spinner("🔍 Analizando pistas con IA y Bases de Datos..."):
|
| 1349 |
+
# 1. Extracción (IA Optimizada)
|
| 1350 |
+
try:
|
| 1351 |
+
raw_entities = extract_entities_with_gemini(query)
|
| 1352 |
+
except:
|
| 1353 |
+
raw_entities = {"alimentos": [], "sintomas": []}
|
| 1354 |
+
|
| 1355 |
+
# 2. Refuerzo + Traducción Local (Local)
|
| 1356 |
+
reinforced = reinforce_entities_with_keywords(raw_entities, query, FOOD_TO_COMPOUND_MAP, MASTER_SYMPTOM_MAP)
|
| 1357 |
+
final_symptoms = translate_symptoms_local(reinforced.get("sintomas", []), MASTER_SYMPTOM_MAP)
|
| 1358 |
+
final_entities = {"alimentos": reinforced.get("alimentos", []), "sintomas": final_symptoms}
|
| 1359 |
+
st.session_state.entities = final_entities
|
| 1360 |
+
|
| 1361 |
+
# 3. Búsqueda (Local)
|
| 1362 |
+
results = find_best_matches_hybrid(final_entities, alimentos_data)
|
| 1363 |
+
st.session_state.search_results = results
|
| 1364 |
|
| 1365 |
+
if st.session_state.search_results:
|
| 1366 |
+
results = st.session_state.search_results
|
| 1367 |
+
best_match = results[0]['entry']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1368 |
|
| 1369 |
+
st.success(f"🔎 Coincidencia Principal: **{best_match.get('condicion_asociada')}**")
|
|
|
|
| 1370 |
|
| 1371 |
+
# Generación de textos (Bajo demanda o caché)
|
| 1372 |
+
cache_key = f"analysis_{best_match.get('condicion_asociada')}"
|
| 1373 |
+
if cache_key not in st.session_state.analysis_cache:
|
| 1374 |
+
with st.spinner("✍️ Redactando informe clínico..."):
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1375 |
try:
|
| 1376 |
+
analysis = generate_detailed_analysis(st.session_state.user_query, best_match)
|
| 1377 |
+
st.session_state.analysis_cache[cache_key] = analysis
|
| 1378 |
+
except:
|
| 1379 |
+
st.session_state.analysis_cache[cache_key] = "No se pudo generar el análisis detallado."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1380 |
|
| 1381 |
+
# --- VISUALIZACIÓN EN PESTAÑAS (MÁS LIMPIO) ---
|
| 1382 |
+
tab_main, tab_neuro, tab_mol = st.tabs(["💡 Interpretación Clínica", "🧠 Efectos Neuropsicológicos", "🔬 Análisis Molecular"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1383 |
|
| 1384 |
+
with tab_main:
|
| 1385 |
+
st.markdown(st.session_state.analysis_cache[cache_key])
|
| 1386 |
+
st.markdown("---")
|
| 1387 |
+
st.caption("Gráfico de otras posibles causas:")
|
| 1388 |
+
st.altair_chart(create_relevance_chart(results), use_container_width=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1389 |
|
| 1390 |
+
with tab_neuro:
|
| 1391 |
+
st.info("Este análisis se basa en la interacción conocida entre nutrientes y neurotransmisores.")
|
| 1392 |
+
neuro_text = generate_neuro_report_text(st.session_state.entities, FOOD_TO_COMPOUND_MAP, INTEGRATED_NEURO_FOOD_MAP)
|
| 1393 |
+
st.markdown(neuro_text)
|
| 1394 |
+
|
| 1395 |
+
with tab_mol:
|
| 1396 |
+
st.info("Desglose químico basado en la base de datos FoodB.")
|
| 1397 |
+
mol_text = generate_molecular_report_text(best_match, st.session_state.entities, foodb_index, FOOD_NAME_TO_FOODB_KEY, COMPOUND_SYNONYM_MAP, KNOWN_TRIGGERS_MAP)
|
| 1398 |
+
st.markdown(mol_text)
|
| 1399 |
+
|
| 1400 |
+
# Botón de descarga (Combinando todo)
|
| 1401 |
+
full_report = f"REPORTE CLÍNICO\n\n{st.session_state.analysis_cache[cache_key]}\n\n{neuro_text}\n\n{mol_text}"
|
| 1402 |
+
word_data = generate_word_report(full_report)
|
| 1403 |
+
st.download_button("📄 Descargar Informe Completo (Word)", data=word_data, file_name="Reporte_Detective.docx", mime="application/vnd.openxmlformats-officedocument.wordprocessingml.document")
|
| 1404 |
+
|
| 1405 |
+
elif submitted:
|
| 1406 |
+
# Creamos un contenedor de advertencia visualmente más claro
|
| 1407 |
+
with st.container(border=True):
|
| 1408 |
+
st.warning("⚠️ No pudimos identificar una causa clara con la información proporcionada.")
|
| 1409 |
|
| 1410 |
+
col_help, col_tips = st.columns([1, 2])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1411 |
|
| 1412 |
+
with col_help:
|
| 1413 |
+
st.markdown("### ¿Qué pudo pasar?")
|
| 1414 |
+
st.markdown("""
|
| 1415 |
+
- **Descripción muy breve:** La IA necesita contexto.
|
| 1416 |
+
- **Sinónimos desconocidos:** Usaste términos muy coloquiales.
|
| 1417 |
+
- **Fallo de conexión:** La IA no respondió a tiempo.
|
| 1418 |
+
""")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1419 |
|
| 1420 |
+
with col_tips:
|
| 1421 |
+
st.info("💡 **Intenta reformular tu consulta así:**")
|
| 1422 |
+
st.code("Siento [SÍNTOMA] y [SÍNTOMA] después de comer [ALIMENTO].", language="text")
|
| 1423 |
+
st.markdown("**Ejemplo:** _Me duele mucho la cabeza tipo migraña cada vez que como queso curado y tomo vino tinto._")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|