Spaces:
Sleeping
Sleeping
| """ | |
| Aplicacion Gradio para Hugging Face Spaces | |
| Sistema RAG de ALIA Turismo con Salamandra 7B Instruct | |
| """ | |
| import gradio as gr | |
| import os | |
| from pathlib import Path | |
| import sys | |
| import torch | |
| from datetime import datetime | |
| # Configurar paths | |
| ROOT_DIR = Path(__file__).parent | |
| sys.path.insert(0, str(ROOT_DIR)) | |
| # Importar sistema RAG | |
| from rag_system import RAGLLMSystem | |
| # Inicializar sistema RAG (se carga una sola vez) | |
| print("[ALIA] Inicializando sistema RAG...") | |
| rag_system = RAGLLMSystem() | |
| print("[ALIA] Sistema listo!") | |
| def format_sources(sources): | |
| """Formatear fuentes para display.""" | |
| if not sources: | |
| return "" | |
| sources_text = "\n\n---\n\n### 📚 Fuentes Consultadas:\n\n" | |
| for i, source in enumerate(sources, 1): | |
| sources_text += f"**{i}. {source['filename']}**\n" | |
| sources_text += f" - Categoría: {source['category']}\n" | |
| sources_text += f" - Relevancia: {source['score']:.2%}\n\n" | |
| return sources_text | |
| def query_rag( | |
| question, | |
| top_k, | |
| score_threshold, | |
| max_tokens, | |
| temperature, | |
| history | |
| ): | |
| """Procesar consulta RAG.""" | |
| if not question or not question.strip(): | |
| return history, "" | |
| # Añadir pregunta del usuario al historial | |
| history = history + [[question, None]] | |
| # Procesar con RAG | |
| try: | |
| result = rag_system.query( | |
| question=question, | |
| top_k=int(top_k), | |
| score_threshold=float(score_threshold), | |
| max_new_tokens=int(max_tokens), | |
| temperature=float(temperature) | |
| ) | |
| # Formatear respuesta con fuentes | |
| answer = result.answer | |
| sources = format_sources(result.sources) | |
| full_response = answer + sources | |
| # Añadir métricas | |
| metrics = f"\n\n---\n\n⏱️ **Tiempos**: Búsqueda: {result.retrieval_time:.2f}s | Generación: {result.generation_time:.2f}s | Total: {result.total_time:.2f}s" | |
| full_response += metrics | |
| # Actualizar historial con respuesta | |
| history[-1][1] = full_response | |
| except Exception as e: | |
| error_msg = f"❌ Error procesando consulta: {str(e)}" | |
| history[-1][1] = error_msg | |
| return history, "" | |
| def create_interface(): | |
| """Crear interface Gradio.""" | |
| # Detectar dispositivo | |
| device = "GPU" if torch.cuda.is_available() else "CPU" | |
| if torch.cuda.is_available(): | |
| gpu_name = torch.cuda.get_device_name(0) | |
| device_info = f"🟢 {device}: {gpu_name}" | |
| else: | |
| device_info = f"🟡 {device}" | |
| # Tema personalizado | |
| theme = gr.themes.Soft( | |
| primary_hue="blue", | |
| secondary_hue="green", | |
| ) | |
| with gr.Blocks( | |
| theme=theme, | |
| title="ALIA Turismo - Asistente RAG", | |
| css=""" | |
| .gradio-container {max-width: 1200px !important} | |
| #title {text-align: center; color: #1976D2; font-size: 2.5em; font-weight: 700;} | |
| #subtitle {text-align: center; color: #666; font-size: 1.1em; margin-bottom: 2em;} | |
| #device-info {text-align: center; padding: 0.5em; background: #f0f0f0; border-radius: 8px;} | |
| """ | |
| ) as demo: | |
| # Header | |
| gr.Markdown( | |
| """ | |
| <div id="title">🏛️ ALIA Turismo</div> | |
| <div id="subtitle">Asistente de Planes Estratégicos de Turismo</div> | |
| """, | |
| elem_id="header" | |
| ) | |
| gr.Markdown( | |
| f""" | |
| <div id="device-info">{device_info} | Salamandra 7B Instruct | 499 Documentos Indexados</div> | |
| """ | |
| ) | |
| with gr.Row(): | |
| with gr.Column(scale=3): | |
| # Chat interface | |
| chatbot = gr.Chatbot( | |
| label="Conversación", | |
| height=500, | |
| show_label=False, | |
| bubble_full_width=False | |
| ) | |
| with gr.Row(): | |
| with gr.Column(scale=9): | |
| question_input = gr.Textbox( | |
| placeholder="Escribe tu pregunta sobre planes turísticos...", | |
| show_label=False, | |
| container=False | |
| ) | |
| with gr.Column(scale=1, min_width=80): | |
| submit_btn = gr.Button("Enviar", variant="primary", size="sm") | |
| # Ejemplos | |
| gr.Examples( | |
| examples=[ | |
| "¿Cuáles son las principales estrategias de turismo sostenible?", | |
| "¿Cómo se implementa la gobernanza en destinos turísticos inteligentes?", | |
| "¿Qué indicadores se usan para medir el éxito de los planes turísticos?", | |
| "¿Cuáles son las mejores prácticas de marketing digital para destinos?", | |
| ], | |
| inputs=question_input, | |
| label="💡 Preguntas Sugeridas" | |
| ) | |
| clear_btn = gr.Button("🔄 Nueva Conversación", size="sm") | |
| with gr.Column(scale=1): | |
| # Panel de configuración | |
| gr.Markdown("### ⚙️ Configuración") | |
| top_k = gr.Slider( | |
| minimum=1, | |
| maximum=10, | |
| value=5, | |
| step=1, | |
| label="Documentos a recuperar", | |
| info="Cuántos documentos consultar" | |
| ) | |
| score_threshold = gr.Slider( | |
| minimum=0.0, | |
| maximum=1.0, | |
| value=0.6, | |
| step=0.05, | |
| label="Umbral de relevancia", | |
| info="Puntuación mínima (0-1)" | |
| ) | |
| max_tokens = gr.Slider( | |
| minimum=256, | |
| maximum=2048, | |
| value=1024, | |
| step=256, | |
| label="Tokens máximos", | |
| info="Longitud de respuesta" | |
| ) | |
| temperature = gr.Slider( | |
| minimum=0.0, | |
| maximum=1.0, | |
| value=0.7, | |
| step=0.1, | |
| label="Temperature", | |
| info="Creatividad (0=conservador, 1=creativo)" | |
| ) | |
| gr.Markdown("---") | |
| # Info | |
| with gr.Accordion("ℹ️ Acerca de", open=False): | |
| gr.Markdown( | |
| """ | |
| **ALIA Turismo** - Asistente inteligente para consultas sobre | |
| planes estratégicos de turismo. | |
| **Modelo:** Salamandra 7B Instruct (BSC) | |
| **Documentos:** 499 planes turísticos | |
| **Idioma:** Español/Catalán | |
| **Versión:** 1.1.0 | |
| **Licencia:** Apache 2.0 | |
| Desarrollado por Barcelona Supercomputing Center | |
| """ | |
| ) | |
| # Event handlers | |
| submit_event = submit_btn.click( | |
| fn=query_rag, | |
| inputs=[question_input, top_k, score_threshold, max_tokens, temperature, chatbot], | |
| outputs=[chatbot, question_input], | |
| queue=True | |
| ) | |
| question_input.submit( | |
| fn=query_rag, | |
| inputs=[question_input, top_k, score_threshold, max_tokens, temperature, chatbot], | |
| outputs=[chatbot, question_input], | |
| queue=True | |
| ) | |
| clear_btn.click( | |
| fn=lambda: ([], ""), | |
| outputs=[chatbot, question_input], | |
| queue=False | |
| ) | |
| return demo | |
| # Crear y lanzar interface | |
| if __name__ == "__main__": | |
| demo = create_interface() | |
| demo.queue(max_size=10) | |
| demo.launch( | |
| server_name="0.0.0.0", | |
| server_port=7860, | |
| share=False | |
| ) | |