Update app.py
Browse files
app.py
CHANGED
|
@@ -55,7 +55,7 @@ PERSONAJES_POR_PAIS = {
|
|
| 55 |
{"nombre": "El Ekeko", "imagen": "images/bo2.jpg", "descripcion": "Dios aymara de la abundancia y la fortuna con jorobas"},
|
| 56 |
{"nombre": "El Jichi", "imagen": "images/bo3.webp", "descripcion": "Serpiente protectora de ríos y lagunas en la cultura andina"}
|
| 57 |
],
|
| 58 |
-
|
| 59 |
{"nombre": "Curupira", "imagen": "images/br1.jpeg", "descripcion": "Protector del bosque amazónico con pies al revés"},
|
| 60 |
{"nombre": "Saci-Pererê", "imagen": "images/br2.jpg", "descripcion": "Duende travieso de una pierna que fuma pipa"},
|
| 61 |
{"nombre": "Yebá Bëló", "imagen": "images/br3.jpg", "descripcion": "Abuela del mundo en mitología desana, creadora del universo"}
|
|
@@ -204,35 +204,8 @@ def load_model():
|
|
| 204 |
|
| 205 |
model_loaded = load_model()
|
| 206 |
|
| 207 |
-
def format_chat_history(messages: list, exclude_last_user: bool = True) -> list:
|
| 208 |
-
"""Formatea el historial de chat para el modelo"""
|
| 209 |
-
formatted_history = []
|
| 210 |
-
messages_to_process = messages[:]
|
| 211 |
-
if exclude_last_user and messages_to_process and messages_to_process[-1].get("role") == "user":
|
| 212 |
-
messages_to_process = messages_to_process[:-1]
|
| 213 |
-
|
| 214 |
-
for message in messages_to_process:
|
| 215 |
-
current_role = message.get("role")
|
| 216 |
-
current_content = message.get("content", "").strip()
|
| 217 |
-
|
| 218 |
-
if current_role == "assistant" and message.get("metadata"):
|
| 219 |
-
continue
|
| 220 |
-
if not current_content:
|
| 221 |
-
continue
|
| 222 |
-
|
| 223 |
-
if formatted_history and formatted_history[-1]["role"] == current_role:
|
| 224 |
-
formatted_history[-1]["content"] += "\n\n" + current_content
|
| 225 |
-
else:
|
| 226 |
-
formatted_history.append({
|
| 227 |
-
"role": current_role,
|
| 228 |
-
"content": current_content
|
| 229 |
-
})
|
| 230 |
-
|
| 231 |
-
return formatted_history
|
| 232 |
-
|
| 233 |
def stream_iberotales_response(
|
| 234 |
-
user_message: str,
|
| 235 |
-
messages: list,
|
| 236 |
system_message: str = DEFAULT_SYSTEM_MESSAGE,
|
| 237 |
max_new_tokens: int = DEFAULT_MAX_NEW_TOKENS,
|
| 238 |
temperature: float = 0.7,
|
|
@@ -240,29 +213,20 @@ def stream_iberotales_response(
|
|
| 240 |
top_k: int = 50,
|
| 241 |
repetition_penalty: float = 1.2,
|
| 242 |
) -> Iterator[list]:
|
| 243 |
-
"""Genera respuesta con streaming"""
|
| 244 |
global model, tokenizer
|
| 245 |
|
| 246 |
if model is None or tokenizer is None:
|
| 247 |
-
|
| 248 |
-
yield messages
|
| 249 |
return
|
| 250 |
|
| 251 |
try:
|
| 252 |
-
|
| 253 |
conversation = []
|
| 254 |
if system_message.strip():
|
| 255 |
conversation.append({"role": "system", "content": system_message.strip()})
|
| 256 |
-
conversation.extend(chat_history)
|
| 257 |
conversation.append({"role": "user", "content": user_message})
|
| 258 |
|
| 259 |
-
# Validar alternancia
|
| 260 |
-
for i in range(1, len(conversation)):
|
| 261 |
-
if conversation[i]["role"] == conversation[i-1]["role"] and conversation[i-1]["role"] != "system":
|
| 262 |
-
messages.append(ChatMessage(role="assistant", content="Error: Reinicia la conversación."))
|
| 263 |
-
yield messages
|
| 264 |
-
return
|
| 265 |
-
|
| 266 |
prompt = tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True)
|
| 267 |
response = model(
|
| 268 |
prompt,
|
|
@@ -275,6 +239,7 @@ def stream_iberotales_response(
|
|
| 275 |
)
|
| 276 |
|
| 277 |
full_response = ""
|
|
|
|
| 278 |
thinking_message_index = None
|
| 279 |
solution_message_index = None
|
| 280 |
in_think_block = False
|
|
@@ -352,15 +317,8 @@ def stream_iberotales_response(
|
|
| 352 |
yield messages
|
| 353 |
|
| 354 |
except Exception as e:
|
| 355 |
-
|
| 356 |
-
yield messages
|
| 357 |
-
|
| 358 |
-
def user_message(msg: str, history: list) -> tuple[str, list]:
|
| 359 |
-
"""Añade mensaje del usuario al historial"""
|
| 360 |
-
history.append(ChatMessage(role="user", content=msg))
|
| 361 |
-
return "", history
|
| 362 |
|
| 363 |
-
# 1. CAMBIO EN LA FUNCIÓN actualizar_personajes:
|
| 364 |
def actualizar_personajes(pais_seleccionado):
|
| 365 |
"""Actualiza la galería de personajes según el país seleccionado"""
|
| 366 |
global current_personajes
|
|
@@ -370,11 +328,11 @@ def actualizar_personajes(pais_seleccionado):
|
|
| 370 |
if not personajes:
|
| 371 |
return [], "Selecciona un país para ver sus personajes"
|
| 372 |
|
| 373 |
-
# Crear lista de imágenes
|
| 374 |
imagenes = []
|
| 375 |
for p in personajes:
|
| 376 |
if os.path.exists(p["imagen"]):
|
| 377 |
-
imagenes.append(p["imagen"])
|
| 378 |
else:
|
| 379 |
# Imagen placeholder si no existe
|
| 380 |
imagenes.append("https://via.placeholder.com/100x100.png?text=No+Image")
|
|
@@ -388,17 +346,13 @@ def crear_prompt_desde_personaje(evt: gr.SelectData):
|
|
| 388 |
try:
|
| 389 |
if evt.index is not None and evt.index < len(current_personajes):
|
| 390 |
personaje = current_personajes[evt.index]
|
| 391 |
-
return f"Crea una historia sobre {personaje['nombre']}, {personaje['descripcion']}"
|
| 392 |
else:
|
| 393 |
return "Crea una historia sobre un personaje mítico"
|
| 394 |
except Exception as e:
|
| 395 |
print(f"Error al crear prompt: {e}")
|
| 396 |
return "Crea una historia sobre un personaje mítico"
|
| 397 |
|
| 398 |
-
# Aplicar decorador @spaces.GPU si es necesario
|
| 399 |
-
# if IS_HF_SPACE and SPACES_AVAILABLE and torch.cuda.is_available():
|
| 400 |
-
# stream_iberotales_response = spaces.GPU(stream_iberotales_response)
|
| 401 |
-
|
| 402 |
# CSS personalizado para mejorar la apariencia
|
| 403 |
custom_css = """
|
| 404 |
.gradio-container {
|
|
@@ -472,12 +426,8 @@ with gr.Blocks(fill_height=True, title="Iberotales", css=custom_css) as demo:
|
|
| 472 |
elem_id="galeria",
|
| 473 |
columns=1,
|
| 474 |
rows=4,
|
| 475 |
-
height=350
|
| 476 |
-
# REMOVER ESTAS LÍNEAS QUE CAUSAN EL ERROR:
|
| 477 |
-
# object_fit="cover",
|
| 478 |
-
# preview=False
|
| 479 |
)
|
| 480 |
-
|
| 481 |
|
| 482 |
# Panel derecho - Chat
|
| 483 |
with gr.Column(scale=2):
|
|
@@ -505,30 +455,32 @@ with gr.Blocks(fill_height=True, title="Iberotales", css=custom_css) as demo:
|
|
| 505 |
max_tokens = gr.Slider(100, MAX_MAX_NEW_TOKENS, DEFAULT_MAX_NEW_TOKENS, label="Tokens", container=False)
|
| 506 |
temperature = gr.Slider(0.1, 2.0, 0.7, label="Temp", container=False)
|
| 507 |
|
| 508 |
-
# Variables de estado
|
| 509 |
-
msg_store = gr.State("")
|
| 510 |
-
|
| 511 |
# Eventos
|
| 512 |
-
def
|
|
|
|
| 513 |
if not msg.strip():
|
| 514 |
-
return
|
| 515 |
-
|
| 516 |
-
|
| 517 |
-
|
| 518 |
-
yield
|
|
|
|
|
|
|
|
|
|
|
|
|
| 519 |
|
| 520 |
# Actualizar personajes cuando cambia el país
|
| 521 |
pais_dropdown.change(
|
| 522 |
fn=actualizar_personajes,
|
| 523 |
inputs=[pais_dropdown],
|
| 524 |
-
outputs=[galeria_personajes]
|
| 525 |
)
|
| 526 |
|
| 527 |
# Cargar personajes iniciales
|
| 528 |
demo.load(
|
| 529 |
fn=actualizar_personajes,
|
| 530 |
inputs=[pais_dropdown],
|
| 531 |
-
outputs=[galeria_personajes]
|
| 532 |
)
|
| 533 |
|
| 534 |
# Crear prompt desde galería
|
|
@@ -539,30 +491,26 @@ with gr.Blocks(fill_height=True, title="Iberotales", css=custom_css) as demo:
|
|
| 539 |
|
| 540 |
# Envío de mensajes
|
| 541 |
input_box.submit(
|
| 542 |
-
|
| 543 |
-
inputs=[input_box,
|
| 544 |
-
outputs=[msg_store, chatbot],
|
| 545 |
-
queue=False
|
| 546 |
-
).then(
|
| 547 |
-
generate_response,
|
| 548 |
-
inputs=[msg_store, chatbot, max_tokens, temperature],
|
| 549 |
outputs=chatbot
|
|
|
|
|
|
|
|
|
|
| 550 |
)
|
| 551 |
|
| 552 |
send_button.click(
|
| 553 |
-
|
| 554 |
-
inputs=[input_box,
|
| 555 |
-
outputs=[msg_store, chatbot],
|
| 556 |
-
queue=False
|
| 557 |
-
).then(
|
| 558 |
-
generate_response,
|
| 559 |
-
inputs=[msg_store, chatbot, max_tokens, temperature],
|
| 560 |
outputs=chatbot
|
|
|
|
|
|
|
|
|
|
| 561 |
)
|
| 562 |
|
| 563 |
clear_button.click(
|
| 564 |
-
lambda: ([], ""
|
| 565 |
-
outputs=[chatbot, input_box
|
| 566 |
queue=False
|
| 567 |
)
|
| 568 |
|
|
|
|
| 55 |
{"nombre": "El Ekeko", "imagen": "images/bo2.jpg", "descripcion": "Dios aymara de la abundancia y la fortuna con jorobas"},
|
| 56 |
{"nombre": "El Jichi", "imagen": "images/bo3.webp", "descripcion": "Serpiente protectora de ríos y lagunas en la cultura andina"}
|
| 57 |
],
|
| 58 |
+
"🇧🇷 Brasil": [
|
| 59 |
{"nombre": "Curupira", "imagen": "images/br1.jpeg", "descripcion": "Protector del bosque amazónico con pies al revés"},
|
| 60 |
{"nombre": "Saci-Pererê", "imagen": "images/br2.jpg", "descripcion": "Duende travieso de una pierna que fuma pipa"},
|
| 61 |
{"nombre": "Yebá Bëló", "imagen": "images/br3.jpg", "descripcion": "Abuela del mundo en mitología desana, creadora del universo"}
|
|
|
|
| 204 |
|
| 205 |
model_loaded = load_model()
|
| 206 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 207 |
def stream_iberotales_response(
|
| 208 |
+
user_message: str,
|
|
|
|
| 209 |
system_message: str = DEFAULT_SYSTEM_MESSAGE,
|
| 210 |
max_new_tokens: int = DEFAULT_MAX_NEW_TOKENS,
|
| 211 |
temperature: float = 0.7,
|
|
|
|
| 213 |
top_k: int = 50,
|
| 214 |
repetition_penalty: float = 1.2,
|
| 215 |
) -> Iterator[list]:
|
| 216 |
+
"""Genera respuesta con streaming - sin historial"""
|
| 217 |
global model, tokenizer
|
| 218 |
|
| 219 |
if model is None or tokenizer is None:
|
| 220 |
+
yield [ChatMessage(role="assistant", content="Error: Modelo no disponible.")]
|
|
|
|
| 221 |
return
|
| 222 |
|
| 223 |
try:
|
| 224 |
+
# Crear conversación simple sin historial
|
| 225 |
conversation = []
|
| 226 |
if system_message.strip():
|
| 227 |
conversation.append({"role": "system", "content": system_message.strip()})
|
|
|
|
| 228 |
conversation.append({"role": "user", "content": user_message})
|
| 229 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 230 |
prompt = tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True)
|
| 231 |
response = model(
|
| 232 |
prompt,
|
|
|
|
| 239 |
)
|
| 240 |
|
| 241 |
full_response = ""
|
| 242 |
+
messages = []
|
| 243 |
thinking_message_index = None
|
| 244 |
solution_message_index = None
|
| 245 |
in_think_block = False
|
|
|
|
| 317 |
yield messages
|
| 318 |
|
| 319 |
except Exception as e:
|
| 320 |
+
yield [ChatMessage(role="assistant", content=f"Error: {str(e)}")]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 321 |
|
|
|
|
| 322 |
def actualizar_personajes(pais_seleccionado):
|
| 323 |
"""Actualiza la galería de personajes según el país seleccionado"""
|
| 324 |
global current_personajes
|
|
|
|
| 328 |
if not personajes:
|
| 329 |
return [], "Selecciona un país para ver sus personajes"
|
| 330 |
|
| 331 |
+
# Crear lista de imágenes para la galería
|
| 332 |
imagenes = []
|
| 333 |
for p in personajes:
|
| 334 |
if os.path.exists(p["imagen"]):
|
| 335 |
+
imagenes.append(p["imagen"])
|
| 336 |
else:
|
| 337 |
# Imagen placeholder si no existe
|
| 338 |
imagenes.append("https://via.placeholder.com/100x100.png?text=No+Image")
|
|
|
|
| 346 |
try:
|
| 347 |
if evt.index is not None and evt.index < len(current_personajes):
|
| 348 |
personaje = current_personajes[evt.index]
|
| 349 |
+
return f"Crea una historia sobre {personaje['nombre']}, {personaje['descripcion']}"
|
| 350 |
else:
|
| 351 |
return "Crea una historia sobre un personaje mítico"
|
| 352 |
except Exception as e:
|
| 353 |
print(f"Error al crear prompt: {e}")
|
| 354 |
return "Crea una historia sobre un personaje mítico"
|
| 355 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 356 |
# CSS personalizado para mejorar la apariencia
|
| 357 |
custom_css = """
|
| 358 |
.gradio-container {
|
|
|
|
| 426 |
elem_id="galeria",
|
| 427 |
columns=1,
|
| 428 |
rows=4,
|
| 429 |
+
height=350
|
|
|
|
|
|
|
|
|
|
| 430 |
)
|
|
|
|
| 431 |
|
| 432 |
# Panel derecho - Chat
|
| 433 |
with gr.Column(scale=2):
|
|
|
|
| 455 |
max_tokens = gr.Slider(100, MAX_MAX_NEW_TOKENS, DEFAULT_MAX_NEW_TOKENS, label="Tokens", container=False)
|
| 456 |
temperature = gr.Slider(0.1, 2.0, 0.7, label="Temp", container=False)
|
| 457 |
|
|
|
|
|
|
|
|
|
|
| 458 |
# Eventos
|
| 459 |
+
def generate_and_display(msg, max_tok, temp):
|
| 460 |
+
"""Genera respuesta y la muestra en el chatbot"""
|
| 461 |
if not msg.strip():
|
| 462 |
+
return []
|
| 463 |
+
|
| 464 |
+
# Agregar mensaje del usuario
|
| 465 |
+
messages = [ChatMessage(role="user", content=msg)]
|
| 466 |
+
yield messages
|
| 467 |
+
|
| 468 |
+
# Generar respuesta
|
| 469 |
+
for response_messages in stream_iberotales_response(msg, DEFAULT_SYSTEM_MESSAGE, max_tok, temp):
|
| 470 |
+
yield [ChatMessage(role="user", content=msg)] + response_messages
|
| 471 |
|
| 472 |
# Actualizar personajes cuando cambia el país
|
| 473 |
pais_dropdown.change(
|
| 474 |
fn=actualizar_personajes,
|
| 475 |
inputs=[pais_dropdown],
|
| 476 |
+
outputs=[galeria_personajes]
|
| 477 |
)
|
| 478 |
|
| 479 |
# Cargar personajes iniciales
|
| 480 |
demo.load(
|
| 481 |
fn=actualizar_personajes,
|
| 482 |
inputs=[pais_dropdown],
|
| 483 |
+
outputs=[galeria_personajes]
|
| 484 |
)
|
| 485 |
|
| 486 |
# Crear prompt desde galería
|
|
|
|
| 491 |
|
| 492 |
# Envío de mensajes
|
| 493 |
input_box.submit(
|
| 494 |
+
generate_and_display,
|
| 495 |
+
inputs=[input_box, max_tokens, temperature],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 496 |
outputs=chatbot
|
| 497 |
+
).then(
|
| 498 |
+
lambda: "",
|
| 499 |
+
outputs=input_box
|
| 500 |
)
|
| 501 |
|
| 502 |
send_button.click(
|
| 503 |
+
generate_and_display,
|
| 504 |
+
inputs=[input_box, max_tokens, temperature],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 505 |
outputs=chatbot
|
| 506 |
+
).then(
|
| 507 |
+
lambda: "",
|
| 508 |
+
outputs=input_box
|
| 509 |
)
|
| 510 |
|
| 511 |
clear_button.click(
|
| 512 |
+
lambda: ([], ""),
|
| 513 |
+
outputs=[chatbot, input_box],
|
| 514 |
queue=False
|
| 515 |
)
|
| 516 |
|