Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import spacy | |
| from faster_whisper import WhisperModel | |
| from gtts import gTTS | |
| from transformers import pipeline | |
| import os | |
| import sys | |
| from tools.asesoramiento_tool import asesorar_consulta | |
| from tools.triaje_tool import triaje_primer_tool | |
| # Configuración y carga del modelo de lenguaje especializado | |
| model_dir = os.path.join(os.path.dirname(__file__), "models") | |
| os.makedirs(model_dir, exist_ok=True) | |
| model_path = os.path.join(model_dir, "en_core_sci_md") | |
| if not os.path.exists(model_path): | |
| os.system(f"{sys.executable} -m spacy download en_core_sci_md --target {model_dir}") | |
| nlp = spacy.load(model_path) | |
| # Cargar modelos de speech-to-text y text-generation | |
| whisper_model = WhisperModel("tiny", device="cpu", compute_type="int8") | |
| health_nlp = pipeline("text-generation", model="mistralai/Mixtral-8x7B-Instruct-v0.1") | |
| def speech_to_text(audio_path): | |
| try: | |
| segments, _ = whisper_model.transcribe(audio_path, language="es") | |
| return " ".join([segment.text for segment in segments]) | |
| except Exception as e: | |
| return f"Error en la transcripción: {str(e)}" | |
| def text_to_speech(text): | |
| try: | |
| tts = gTTS(text, lang="es") | |
| audio_file = "response.mp3" | |
| tts.save(audio_file) | |
| return audio_file | |
| except Exception as e: | |
| return None | |
| def process_input(audio, mode="asesoramiento"): | |
| if audio is None: | |
| return None, "Por favor, graba algo." | |
| query = speech_to_text(audio) | |
| if mode == "asesoramiento": | |
| resultado = asesorar_consulta(query) | |
| respuesta = resultado["respuesta"] | |
| elif mode == "triaje": | |
| doc = nlp(query.lower()) | |
| # Se mejora la extracción utilizando una lista ampliada de entidades y posibles sinónimos | |
| symptoms = [ent.text for ent in doc.ents if ent.label_ in ["SYMPTOM", "DISEASE"]] | |
| data = { | |
| "sintomas": " ".join(symptoms) if symptoms else query, | |
| "intensidad": "moderada", | |
| "duracion": "6", | |
| "antecedentes": "" | |
| } | |
| resultado = triaje_primer_tool(data) | |
| respuesta = f"Puntaje: {resultado['score']}. {resultado['recomendacion']}" | |
| elif mode == "mistral": | |
| prompt = f"Evalúa: {query}. Sugiere pasos a seguir." | |
| respuesta = health_nlp(prompt, max_length=150)[0]["generated_text"] | |
| respuesta += "\n\n*Nota: Este es un asistente de IA y no sustituye el consejo de un profesional médico.*" | |
| else: | |
| respuesta = "Modo no reconocido." | |
| audio_response = text_to_speech(respuesta) | |
| return audio_response, f"Pregunta: {query}\nRespuesta: {respuesta}" | |
| with gr.Blocks(title="Agente Médico Speech-to-Speech") as demo: | |
| gr.Markdown("# Agente Médico\nHabla y elige un modo: Asesoramiento, Triaje o Mistral.") | |
| with gr.Row(): | |
| audio_input = gr.Audio(source="microphone", type="filepath", label="Habla aquí") | |
| mode_select = gr.Radio(["asesoramiento", "triaje", "mistral"], label="Modo", value="asesoramiento") | |
| with gr.Row(): | |
| audio_output = gr.Audio(label="Respuesta en audio") | |
| text_output = gr.Textbox(label="Transcripción y Respuesta") | |
| btn = gr.Button("Procesar") | |
| btn.click(fn=process_input, inputs=[audio_input, mode_select], outputs=[audio_output, text_output]) | |
| demo.launch() | |