Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| from PIL import Image | |
| import numpy as np | |
| import torch | |
| from transformers import Qwen2_5_VLForConditionalGeneration, AutoProcessor | |
| from qwen_vl_utils import process_vision_info | |
| from peft import PeftModel | |
| from pathlib import Path | |
| system_prompt = ( | |
| "A conversation between User and Assistant. The user asks a question, and the Assistant solves it. " | |
| "The assistant es un experto sobre Colombia. Primero razona en mente y luego da la respuesta. " | |
| "El razonamiento y la respuesta van en <think></think> y <answer></answer>." | |
| ) | |
| MODEL_ID = 'Qwen/Qwen2.5-VL-3B-Instruct' | |
| ADAPTER_ID = 'Factral/qwen2.5vl-3b-colombia-finetuned' | |
| processor = AutoProcessor.from_pretrained(MODEL_ID) | |
| # Carga del modelo base | |
| has_gpu = torch.cuda.is_available() | |
| attn_impl = "flash_attention_2" if has_gpu else "eager" | |
| model = Qwen2_5_VLForConditionalGeneration.from_pretrained( | |
| MODEL_ID, | |
| torch_dtype=torch.bfloat16, | |
| attn_implementation=attn_impl, | |
| device_map="auto" | |
| ) | |
| # Carga y fusión del adaptador PEFT | |
| model = PeftModel.from_pretrained(model, ADAPTER_ID) | |
| model = model.merge_and_unload() | |
| model.eval() | |
| device = torch.device("cuda" if has_gpu else "cpu") | |
| model.to(device) | |
| example_imgs = [ | |
| ("6.png", "Shakira"), | |
| ("163.png", "Tienda esquinera"), | |
| ("img_71_2.png", "Comida colombiana"), | |
| ("img_98.png", "Oso de anteojos"), | |
| ] | |
| def cargar_imagen(imagen_path: str) -> Image.Image: | |
| return Image.open(imagen_path) | |
| with gr.Blocks(theme='lone17/kotaemon') as demo: | |
| demo.css = """ | |
| #galeria-scroll { | |
| max-height: 320px; | |
| overflow-y: auto; | |
| border: 1px solid #ccc; | |
| padding: 8px; | |
| border-radius: 8px; | |
| } | |
| """ | |
| gr.Markdown( | |
| """ | |
| <h1>🇨🇴 | |
| <span style='color:gold;'>Bacan</span><span style='color:blue;'>oResp</span><span style='color:red;'>onder</span> | |
| </h1> | |
| <p>Sube o elige una imagen, haz una pregunta y obtén una respuesta con contexto local.</p> | |
| """ | |
| ) | |
| with gr.Row(): | |
| # Columna izquierda | |
| with gr.Column(scale=1): | |
| gr.Markdown( | |
| """ | |
| #### 📌 Motivación del proyecto | |
| El objetivo de **BacanoResponder** es permitir a los usuarios en Colombia interactuar con imágenes de su entorno y recibir información contextualizada. | |
| <br/> | |
| #### 🌟 Impacto | |
| Ofrecemos respuestas específicas sobre objetos, lugares o costumbres colombianas, beneficiando a estudiantes, turistas y a cualquier persona interesada en nuestras tradiciones. | |
| #### 👥 Equipo | |
| • Fabian Perez | |
| • Henry Mantilla | |
| • Andrea Parra | |
| • Juan Calderón | |
| • Semillero de Investigación del que hacemos parte [SemilleroCV](https://semillerocv.github.io/) | |
| """ | |
| ) | |
| # Columna derecha | |
| with gr.Column(scale=1): | |
| gr.Markdown( | |
| """ | |
| #### 🚀 Ideas futuras | |
| - 📈 Escalar significativamente el dataset | |
| - 🎤 Añadir preguntas por voz en dialectos regionales | |
| - 🌐 Traducción automática para usuarios internacionales | |
| - 🗺️ Más dialectos y costumbres (Amazonía, Caribe, etc.) | |
| - 🔄 Retroalimentación comunitaria para fine-tuning continuo | |
| - 🗺️ Mapas con coordenadas y rutas turísticas | |
| #### 🤖 Modelos utilizados | |
| - *Qwen2.5-VL-3B-Instruct* | |
| - Dataset: [QuestionAnswer-ImgsColombia](https://huggingface.co/datasets/4nd/QuestionAnswer-ImgsColombia) | |
| """ | |
| ) | |
| with gr.Row(equal_height=True): | |
| # Columna izquierda | |
| with gr.Column(scale=1): | |
| pregunta = gr.Textbox( | |
| label="❓ Pregunta sobre tu imagen", | |
| placeholder="¿Qué muestra esta imagen?", | |
| lines=2 | |
| ) | |
| # Asignamos elem_id al Gallery directamente | |
| galeria = gr.Gallery( | |
| label="📁 Elige una imagen de ejemplo", | |
| value=[img for img, _ in example_imgs], | |
| columns=2, | |
| height=None, # el CSS controla altura | |
| allow_preview=True, | |
| show_label=True, | |
| elem_id="galeria-scroll" | |
| ) | |
| # Columna derecha | |
| with gr.Column(scale=1): | |
| imagen_mostrada = gr.Image( | |
| label="🖼 Imagen seleccionada o subida", | |
| type="numpy", | |
| height=256 | |
| ) | |
| respuesta = gr.Textbox( | |
| label="🧠 Respuesta", | |
| interactive=False, | |
| lines=4 | |
| ) | |
| btn_procesar = gr.Button("🔍 Procesar") | |
| def seleccionar_imagen(evt: gr.SelectData): | |
| idx = evt.index | |
| img_path = example_imgs[idx][0] | |
| pil = cargar_imagen(img_path) | |
| return np.array(pil) | |
| galeria.select(fn=seleccionar_imagen, inputs=None, outputs=imagen_mostrada) | |
| def responder(img, pregunta_text): | |
| if img is None or pregunta_text.strip() == "": | |
| return "Por favor sube una imagen y escribe una pregunta." | |
| # Convertir array numpy a PIL si es necesario | |
| if isinstance(img, np.ndarray): | |
| img_pil = Image.fromarray(img.astype('uint8')) | |
| else: | |
| img_pil = img # ya es PIL | |
| messages = [ | |
| { | |
| "role": "system", | |
| "content": [{"type": "text", "text": system_prompt}], | |
| }, | |
| { | |
| "role": "user", | |
| "content": [ | |
| {"type": "text", "text": pregunta_text}, | |
| {"type": "image", "image": img_pil}, | |
| ], | |
| } | |
| ] | |
| text = processor.apply_chat_template( | |
| messages, | |
| tokenize=False, | |
| add_generation_prompt=True | |
| ) | |
| image_inputs, video_inputs = process_vision_info(messages) | |
| inputs = processor( | |
| text=[text], | |
| images=image_inputs, | |
| videos=video_inputs, | |
| padding=True, | |
| return_tensors="pt", | |
| ) | |
| inputs = inputs.to(device) | |
| with torch.no_grad(): | |
| generated_ids = model.generate( | |
| **inputs, | |
| max_new_tokens=512, | |
| top_p=1.0, | |
| do_sample=True, | |
| temperature=0.9 | |
| ) | |
| trimmed = [ | |
| out_ids[len(in_ids):] | |
| for in_ids, out_ids in zip(inputs.input_ids, generated_ids) | |
| ] | |
| respuesta_text = processor.batch_decode( | |
| trimmed, | |
| skip_special_tokens=True, | |
| clean_up_tokenization_spaces=False | |
| ) | |
| return respuesta_text[0] | |
| btn_procesar.click(fn=responder, inputs=[imagen_mostrada, pregunta], outputs=respuesta) | |
| if __name__ == "__main__": | |
| demo.launch() | |