|
|
import gradio as gr |
|
|
from transformers import pipeline |
|
|
from diffusers import DiffusionPipeline |
|
|
import torch |
|
|
import time |
|
|
from tqdm import trange |
|
|
|
|
|
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
torch_dtype = torch.float16 if device == "cuda" else torch.float32 |
|
|
|
|
|
|
|
|
image_pipe = DiffusionPipeline.from_pretrained( |
|
|
"segmind/SSD-1B", |
|
|
torch_dtype=torch_dtype |
|
|
).to(device) |
|
|
|
|
|
|
|
|
text_pipe = pipeline( |
|
|
"text-generation", |
|
|
model="PlanTL-GOB-ES/gpt2-base-bne", |
|
|
device=0 if device == "cuda" else -1 |
|
|
) |
|
|
|
|
|
|
|
|
def crear_microcuento(tema, progress=gr.Progress(track_tqdm=True)): |
|
|
prompt_img = f"Ilustración digital detallada sobre: {tema}, arte suave, poético, luces tenues" |
|
|
|
|
|
for _ in trange(50, desc="Generando imagen..."): |
|
|
time.sleep(0.05) |
|
|
imagen = image_pipe(prompt_img).images[0] |
|
|
|
|
|
for _ in trange(50, desc="Generando microcuento..."): |
|
|
time.sleep(0.03) |
|
|
|
|
|
prompt_txt = f"Escribe un microcuento narrativo, creativo y poético en español sobre: '{tema}'. El cuento debe tener inicio, desarrollo y final en no más de 4 líneas." |
|
|
cuento = text_pipe(prompt_txt, max_new_tokens=100, do_sample=True, temperature=0.9)[0]["generated_text"] |
|
|
|
|
|
return imagen, cuento.replace(prompt_txt, "").strip() |
|
|
|
|
|
|
|
|
gr.Interface( |
|
|
fn=crear_microcuento, |
|
|
inputs=gr.Textbox(lines=2, placeholder="Ej: La lluvia que susurra secretos", label="Tema del microcuento"), |
|
|
outputs=[ |
|
|
gr.Image(label="Imagen generada"), |
|
|
gr.Textbox(label="Microcuento") |
|
|
], |
|
|
title="✨ Microcuentos Ilustrados por IA", |
|
|
description="Ingresa un tema y esta IA creará una imagen artística y un microcuento narrativo inspirado en él.", |
|
|
).launch() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|