import gradio as gr import numpy as np import random import torch from PIL import Image from diffusers import DiffusionPipeline, StableDiffusionImg2ImgPipeline from transformers import CLIPTextModel, CLIPTokenizer # Configuración del dispositivo device = torch.device("cuda" if torch.cuda.is_available() else "cpu") dtype = torch.bfloat16 if device.type == "cuda" else torch.float32 # Cargar los pipelines para las diferentes tareas pipe_diffusion = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=dtype).to(device) pipe_img2img = StableDiffusionImg2ImgPipeline.from_pretrained("runwayml/stable-diffusion-v1-5").to(device) MAX_SEED = np.iinfo(np.int32).max MAX_IMAGE_SIZE = 2048 # Configurar tokenizer y modelo CLIP para truncar correctamente los prompts tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32") text_model = CLIPTextModel.from_pretrained("openai/clip-vit-base-patch32") def truncate_prompt(prompt, max_length=77): inputs = tokenizer(prompt, return_tensors="pt") input_ids = inputs["input_ids"][0][:max_length] truncated_prompt = tokenizer.decode(input_ids, skip_special_tokens=True) return truncated_prompt def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=4, init_image=None, img2img_strength=0.75, progress=gr.Progress(track_tqdm=True)): if randomize_seed: seed = random.randint(0, MAX_SEED) generator = torch.Generator(device=device).manual_seed(seed) # Truncar el prompt si es demasiado largo prompt = truncate_prompt(prompt) # Convertir init_image a formato PIL si no lo está if init_image and not isinstance(init_image, Image.Image): init_image = Image.fromarray(np.array(init_image)) # Liberar memoria CUDA antes de la generación torch.cuda.empty_cache() try: if init_image: init_image = init_image.convert("RGB") generated_image = pipe_img2img( prompt=prompt, init_image=init_image, strength=img2img_strength, num_inference_steps=num_inference_steps, generator=generator ).images[0] else: generated_image = pipe_diffusion( prompt=prompt, width=width, height=height, num_inference_steps=num_inference_steps, generator=generator, guidance_scale=0.0 ).images[0] except RuntimeError as e: if "CUDACachingAllocator.cpp" in str(e): print("CUDA Memory Error: ", e) torch.cuda.empty_cache() return None, seed else: raise e # Mostrar la imagen generada return generated_image, seed examples = [ "a tiny astronaut hatching from an egg on the moon", "a cat holding a sign that says hello world", "an anime illustration of a wiener schnitzel", ] css = """ #col-container { margin: 0 auto; max-width: 520px; } """ with gr.Blocks(css=css) as demo: with gr.Column(elem_id="col-container"): gr.Markdown(f"""# FLUX.1 [schnell] + Stable Diffusion img2img Combinación de generación de imágenes y transformación de imágenes con img2img. """) with gr.Row(): prompt = gr.Text( label="Prompt", show_label=False, max_lines=1, placeholder="Enter your prompt", container=False, ) run_button = gr.Button("Run", scale=0) result = gr.Image(label="Result", show_label=False) with gr.Accordion("Advanced Settings", open=False): seed = gr.Slider( label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, ) randomize_seed = gr.Checkbox(label="Randomize seed", value=True) width = gr.Slider( label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024, ) height = gr.Slider( label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024, ) num_inference_steps = gr.Slider( label="Number of inference steps", minimum=1, maximum=50, step=1, value=4, ) init_image = gr.Image(type="pil", label="Imagen Inicial (opcional)") img2img_strength = gr.Slider( label="Img2Img Strength", minimum=0.0, maximum=1.0, step=0.05, value=0.75, ) gr.Examples( examples=examples, fn=infer, inputs=[prompt], outputs=[result, seed], cache_examples="lazy" ) gr.on( triggers=[run_button.click, prompt.submit], fn=infer, inputs=[prompt, seed, randomize_seed, width, height, num_inference_steps, init_image, img2img_strength], outputs=[result, seed] ) demo.launch()