Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import json | |
| import time | |
| import base64 | |
| import io | |
| from PIL import Image | |
| from datetime import datetime, timedelta | |
| import uuid | |
| from reportlab.lib.pagesizes import letter | |
| from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Table, TableStyle | |
| from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle | |
| from reportlab.lib.units import inch | |
| from reportlab.lib import colors | |
| import tempfile | |
| import os | |
| import pandas as pd | |
| # Hugging Face Transformers - CORRECTED IMPORTS | |
| import torch | |
| from transformers import AutoProcessor, AutoModelForImageTextToText | |
| # --- Application Configuration --- | |
| MODEL_ID = "google/gemma-3n-E4B-it" # Model from Hugging Face | |
| # Configure the page | |
| st.set_page_config( | |
| page_title="Asistente de Salud Rural", | |
| page_icon="🏥", | |
| layout="wide" | |
| ) | |
| # --- Authentication Setup --- | |
| def setup_hf_authentication(): | |
| """Setup HuggingFace authentication""" | |
| try: | |
| # Get HuggingFace token from environment or Streamlit secrets | |
| hf_token = None | |
| try: | |
| # Try to get from Streamlit secrets first | |
| hf_token = st.secrets["HF_TOKEN"] | |
| except: | |
| # Fallback to environment variable | |
| hf_token = os.getenv("HF_TOKEN") | |
| if hf_token: | |
| # Login using the huggingface_hub | |
| login(token=hf_token, add_to_git_credential=False) | |
| st.success("✅ Autenticación con HuggingFace exitosa") | |
| return True | |
| else: | |
| st.error("❗ Token de Hugging Face no encontrado") | |
| st.info("Configura HF_TOKEN en los secretos del Space o como variable de entorno") | |
| return False | |
| except Exception as e: | |
| st.error(f"Error en autenticación HF: {e}") | |
| return False | |
| # --- CORRECTED: Hugging Face Model Loading --- | |
| def load_hf_model_and_processor(): | |
| """Loads the Gemma model and processor from Hugging Face.""" | |
| try: | |
| # First, authenticate | |
| if not setup_hf_authentication(): | |
| return None, None | |
| processor = AutoProcessor.from_pretrained(MODEL_ID) | |
| # FIXED: Use AutoModelForImageTextToText instead of AutoModelForCausalLM | |
| model = AutoModelForImageTextToText.from_pretrained( | |
| MODEL_ID, | |
| torch_dtype=torch.bfloat16, | |
| load_in_4bit=True, | |
| device_map="auto" # Added for better device handling | |
| ) | |
| st.success(f"✅ Modelo {MODEL_ID} cargado exitosamente") | |
| return processor, model | |
| except Exception as e: | |
| st.error(f"Error al cargar el modelo de Hugging Face: {e}") | |
| st.error("Posibles causas:") | |
| st.error("1. Token de HF inválido o sin permisos para Gemma") | |
| st.error("2. No has aceptado los términos del modelo Gemma en HuggingFace") | |
| st.error("3. Problemas de red o caché") | |
| st.info("Solución: Ve a https://huggingface.co/google/gemma-3n-E4B-it y acepta los términos") | |
| return None, None | |
| processor, model = load_hf_model_and_processor() | |
| # --- CORRECTED: File loading for flat directory structure --- | |
| def load_herbal_medicines(): | |
| # Path is now direct as the file is in the root directory | |
| herbal_file = 'plantas_medicinales.txt' | |
| default_herbs = {'manzanilla': {'name': 'Manzanilla', 'uses': ['Calmante'], 'preparation': 'Té', 'contraindications': ['Embarazo']}} | |
| if os.path.exists(herbal_file): | |
| with open(herbal_file, 'r', encoding='utf-8') as f: | |
| return json.load(f) | |
| return default_herbs | |
| def load_generic_medicines(): | |
| # Path is now direct as the file is in the root directory | |
| generic_file = 'medicamentos_genericos.txt' | |
| default_generics = {'paracetamol': {'name': 'Paracetamol', 'uses': ['Dolor', 'Fiebre']}} | |
| if os.path.exists(generic_file): | |
| with open(generic_file, 'r', encoding='utf-8') as f: | |
| return json.load(f) | |
| return default_generics | |
| # --- Session State Initialization --- | |
| def initialize_session_state(): | |
| """Initialize session state variables for a single user session.""" | |
| if 'user_initialized' not in st.session_state: | |
| st.session_state.user_initialized = False | |
| if 'user_profile' not in st.session_state: | |
| st.session_state.user_profile = {} | |
| if 'medical_history' not in st.session_state: | |
| st.session_state.medical_history = [] | |
| if 'chat_messages' not in st.session_state: | |
| st.session_state.chat_messages = [] | |
| if 'herbal_database' not in st.session_state: | |
| st.session_state.herbal_database = load_herbal_medicines() | |
| if 'generic_database' not in st.session_state: | |
| st.session_state.generic_database = load_generic_medicines() | |
| # --- User Profile Creation --- | |
| def show_user_profile_popup(): | |
| with st.container(): | |
| st.markdown("### 👤 Configuración del Perfil de Usuario") | |
| st.info("Para brindarte la mejor atención médica, necesitamos algunos datos básicos para esta sesión.") | |
| with st.form("user_profile_form"): | |
| col1, col2 = st.columns(2) | |
| with col1: | |
| name = st.text_input("Nombre completo *", value="Usuario de Prueba") | |
| age = st.number_input("Edad *", min_value=0, max_value=120, value=30) | |
| location = st.text_input("Ubicación (Ciudad, País) *", value="Comunidad Rural") | |
| with col2: | |
| chronic_conditions = st.text_area("Condiciones médicas crónicas (separadas por comas)", placeholder="Ej: Diabetes, Hipertensión") | |
| allergies = st.text_area("Alergias conocidas (separadas por comas)", placeholder="Ej: Penicilina, Mariscos") | |
| if st.form_submit_button("✅ Guardar Perfil para esta Sesión", type="primary"): | |
| profile_data = { | |
| 'name': name, 'age': str(age), 'location': location, | |
| 'chronic_conditions': [c.strip() for c in chronic_conditions.split(',') if c.strip()], | |
| 'allergies': [a.strip() for a in allergies.split(',') if a.strip()], | |
| 'current_medications': [] | |
| } | |
| st.session_state.user_profile = profile_data | |
| st.session_state.user_initialized = True | |
| st.success("¡Perfil guardado para esta sesión!") | |
| st.rerun() | |
| # --- System Prompts --- | |
| def get_system_prompts(): | |
| user_profile = st.session_state.get('user_profile', {}) | |
| medical_history = st.session_state.get('medical_history', []) | |
| return { | |
| 'health_assistant': f"""Eres un asistente de salud especializado para comunidades rurales de Sudamérica. | |
| Contexto del Usuario: {json.dumps(user_profile, ensure_ascii=False)} | |
| Historial médico de esta sesión: {json.dumps(medical_history[-3:], ensure_ascii=False)} | |
| Tu rol es evaluar, proporcionar orientación básica, recomendar tratamientos accesibles, y NUNCA dar un diagnóstico definitivo. Usa español simple y sé empático.""" | |
| } | |
| # --- CORRECTED: AI API Call using Transformers --- | |
| def call_gemma_hf_api(prompt_text, image_bytes=None, system_prompt=""): | |
| """Calls the Hugging Face Gemma model with text and optional image.""" | |
| if not processor or not model: | |
| return {"success": False, "error": "El modelo no está cargado."} | |
| try: | |
| # CORRECTED: Follow the exact format from HuggingFace documentation | |
| messages = [] | |
| # Add system message if provided | |
| if system_prompt: | |
| messages.append({ | |
| "role": "system", | |
| "content": system_prompt | |
| }) | |
| # Build user content | |
| user_content = [] | |
| # Add image first if provided (following HF example) | |
| if image_bytes: | |
| # Convert bytes to PIL Image | |
| pil_image = Image.open(io.BytesIO(image_bytes)) | |
| # CORRECTED: Use the exact format from HF documentation | |
| user_content.append({"type": "image", "image": pil_image}) | |
| # Add text content | |
| user_content.append({"type": "text", "text": prompt_text}) | |
| # Add user message | |
| messages.append({ | |
| "role": "user", | |
| "content": user_content | |
| }) | |
| # CORRECTED: Use return_dict=True as shown in HF documentation | |
| inputs = processor.apply_chat_template( | |
| messages, | |
| add_generation_prompt=True, | |
| tokenize=True, | |
| return_dict=True, # Added this parameter | |
| return_tensors="pt" | |
| ).to(model.device) | |
| start_time = time.time() | |
| # Generate response | |
| outputs = model.generate(**inputs, max_new_tokens=800) | |
| response_time = time.time() - start_time | |
| # CORRECTED: Decode only the new tokens | |
| response_text = processor.decode( | |
| outputs[0][inputs["input_ids"].shape[-1]:], | |
| skip_special_tokens=True | |
| ) | |
| return {"success": True, "text": response_text, "response_time": response_time} | |
| except Exception as e: | |
| return {"success": False, "error": f"Error en la API de Hugging Face: {str(e)}"} | |
| # --- Main Application Logic --- | |
| initialize_session_state() | |
| if not st.session_state.user_initialized: | |
| show_user_profile_popup() | |
| st.stop() | |
| st.title("🏥 Asistente de Salud Rural") | |
| st.markdown("Atención médica básica para comunidades rurales de Sudamérica (Demo en Hugging Face)") | |
| user_profile = st.session_state.user_profile | |
| st.info(f"👤 Usuario: {user_profile.get('name', 'N/A')} | 📍 {user_profile.get('location', 'N/A')}") | |
| tab1, tab2, tab3 = st.tabs(["🩺 Evaluación Médica", "🌿 Plantas Medicinales", "📋 Historial de Sesión"]) | |
| with tab1: | |
| st.header("🩺 Evaluación Médica Avanzada") | |
| st.markdown("Chat interactivo para evaluación de síntomas. Los datos se borrarán al cerrar la ventana.") | |
| chat_container = st.container(height=400) | |
| with chat_container: | |
| for message in st.session_state.chat_messages: | |
| with st.chat_message(message["role"]): | |
| st.write(message["content"]) | |
| if "image" in message and message["image"]: | |
| st.image(message["image"], width=200) | |
| user_message = st.text_area("Describe tus síntomas o preguntas:", key="chat_input") | |
| uploaded_image = st.file_uploader("Subir imagen médica (opcional):", type=['png', 'jpg', 'jpeg']) | |
| if st.button("💬 Enviar Consulta", type="primary"): | |
| if user_message.strip(): | |
| image_bytes = None | |
| if uploaded_image: | |
| image_bytes = uploaded_image.getvalue() | |
| st.session_state.chat_messages.append({"role": "user", "content": user_message, "image": image_bytes}) | |
| with st.spinner("🤖 El asistente Gemma está pensando..."): | |
| system_prompts = get_system_prompts() | |
| result = call_gemma_hf_api(user_message, image_bytes, system_prompts['health_assistant']) | |
| if result["success"]: | |
| response_text = result["text"] | |
| st.session_state.chat_messages.append({"role": "assistant", "content": response_text}) | |
| st.session_state.medical_history.append({ | |
| 'timestamp': datetime.now().isoformat(), | |
| 'user_input': user_message, | |
| 'ai_response': response_text | |
| }) | |
| st.rerun() | |
| else: | |
| st.error(f"Error del modelo: {result['error']}") | |
| with tab2: | |
| st.header("🌿 Base de Datos de Plantas Medicinales") | |
| herbal_db = st.session_state.herbal_database | |
| search_term = st.text_input("🔍 Buscar por nombre o uso:", placeholder="Ej: dolor de cabeza, manzanilla...") | |
| for plant_key, plant_data in herbal_db.items(): | |
| if not search_term or search_term.lower() in plant_data['name'].lower() or search_term.lower() in ' '.join(plant_data['uses']): | |
| with st.expander(f"🌱 {plant_data['name']}"): | |
| st.markdown(f"**Usos:** {', '.join(plant_data['uses'])}") | |
| st.markdown(f"**Preparación:** {plant_data.get('preparation', 'No especificado')}") | |
| st.markdown(f"**⚠️ Contraindicaciones:** {', '.join(plant_data.get('contraindications', ['Ninguna conocida']))}") | |
| with tab3: | |
| st.header("📋 Historial de esta Sesión") | |
| if st.session_state.medical_history: | |
| for entry in reversed(st.session_state.medical_history): | |
| with st.expander(f"Consulta - {datetime.fromisoformat(entry['timestamp']).strftime('%H:%M:%S')}"): | |
| st.markdown("**Tu consulta:**") | |
| st.info(entry['user_input']) | |
| st.markdown("**Respuesta del Asistente:**") | |
| st.success(entry['ai_response']) | |
| else: | |
| st.info("No hay consultas en esta sesión todavía.") | |
| st.markdown("---") | |
| st.warning("**Descargo de responsabilidad:** Este asistente es una DEMO y no reemplaza la consulta médica profesional. En caso de emergencia, busque atención médica inmediata.") |