Spaces:
Runtime error
Runtime error
| import spaces | |
| import gradio as gr | |
| import torch | |
| import nltk | |
| import numpy as np | |
| from PIL import Image, ImageDraw | |
| from diffusers import DDIMScheduler | |
| from pipeline_stable_diffusion_xl_opt import StableDiffusionXLPipeline | |
| from injection_utils import regiter_attention_editor_diffusers | |
| from bounded_attention import BoundedAttention | |
| from pytorch_lightning import seed_everything | |
| MODEL_PATH = "stabilityai/stable-diffusion-xl-base-1.0" | |
| RESOLUTION = 256 | |
| MIN_SIZE = 0.01 | |
| WHITE = 255 | |
| COLORS = ["red", "blue", "green", "orange", "purple", "turquoise", "olive"] | |
| CSS = """ | |
| #paper-info a { | |
| color:#008AD7; | |
| text-decoration: none; | |
| } | |
| #paper-info a:hover { | |
| cursor: pointer; | |
| text-decoration: none; | |
| } | |
| .tooltip { | |
| color: #555; | |
| position: relative; | |
| display: inline-block; | |
| cursor: pointer; | |
| } | |
| .tooltip .tooltiptext { | |
| visibility: hidden; | |
| width: 400px; | |
| background-color: #555; | |
| color: #fff; | |
| text-align: center; | |
| padding: 5px; | |
| border-radius: 5px; | |
| position: absolute; | |
| z-index: 1; /* Set z-index to 1 */ | |
| left: 10px; | |
| top: 100%; | |
| opacity: 0; | |
| transition: opacity 0.3s; | |
| } | |
| .tooltip:hover .tooltiptext { | |
| visibility: visible; | |
| opacity: 1; | |
| z-index: 9999; /* Set a high z-index value when hovering */ | |
| } | |
| """ | |
| DESCRIPTION = """ | |
| <p style="text-align: center; font-weight: bold;"> | |
| <span style="font-size: 28px">Bounded Attention</span> | |
| <br> | |
| <span style="font-size: 18px" id="paper-info"> | |
| [<a href="https://omer11a.github.io/bounded-attention/" target="_blank">Project Page</a>] | |
| [<a href="https://arxiv.org/abs/2403.16990" target="_blank">Paper</a>] | |
| [<a href="https://github.com/omer11a/bounded-attention" target="_blank">GitHub</a>] | |
| </span> | |
| </p> | |
| """ | |
| COPY_LINK = """ | |
| <a href="https://huggingface.co/spaces/omer11a/bounded-attention?duplicate=true"> | |
| <img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"> | |
| </a> | |
| Duplicate this space to generate more samples without waiting in queue | |
| """ | |
| ADVANCED_OPTION_DESCRIPTION = """ | |
| <div class="tooltip" >Number of guidance steps ⓘ | |
| <span class="tooltiptext">The number of timesteps in which to perform guidance. Recommended value is 15, but increasing this will also increases the runtime.</span> | |
| </div> | |
| <div class="tooltip">Batch size ⓘ | |
| <span class="tooltiptext">The number of images to generate.</span> | |
| </div> | |
| <div class="tooltip">Initial step size ⓘ | |
| <span class="tooltiptext">The initial step size of the linear step size scheduler when performing guidance.</span> | |
| </div> | |
| <div class="tooltip">Final step size ⓘ | |
| <span class="tooltiptext">The final step size of the linear step size scheduler when performing guidance.</span> | |
| </div> | |
| <div class="tooltip">Number of self-attention clusters per subject ⓘ | |
| <span class="tooltiptext">Determines the number of clusters when clustering the self-attention maps (#clusters = #subject x #clusters_per_subject). Changing this value might improve semantics (adherence to the prompt), especially when the subjects exceed their bounding boxes.</span> | |
| </div> | |
| <div class="tooltip">Cross-attention loss scale factor ⓘ | |
| <span class="tooltiptext">The scale factor of the cross-attention loss term. Increasing it will improve semantic control (adherence to the prompt), but may reduce image quality.</span> | |
| </div> | |
| <div class="tooltip">Self-attention loss scale factor ⓘ | |
| <span class="tooltiptext">The scale factor of the self-attention loss term. Increasing it will improve layout control (adherence to the bounding boxes), but may reduce image quality.</span> | |
| </div> | |
| <div class="tooltip" >Number of Gradient Descent iterations per timestep ⓘ | |
| <span class="tooltiptext">The number of Gradient Descent iterations for each timestep when performing guidance.</span> | |
| </div> | |
| <div class="tooltip" >Loss Threshold ⓘ | |
| <span class="tooltiptext">If the loss is below the threshold, Gradient Descent stops for that timestep. </span> | |
| </div> | |
| <div class="tooltip">Classifier-free guidance scale ⓘ | |
| <span class="tooltiptext">The scale factor of classifier-free guidance.</span> | |
| </div> | |
| """ | |
| FOOTNOTE = """ | |
| <p>The source code of this demo is based on the <a href="https://huggingface.co/spaces/gligen/demo/tree/main">GLIGEN demo</a>.</p> | |
| """ | |
| def inference( | |
| boxes, | |
| prompts, | |
| subject_token_indices, | |
| filter_token_indices, | |
| num_tokens, | |
| init_step_size, | |
| final_step_size, | |
| num_clusters_per_subject, | |
| cross_loss_scale, | |
| self_loss_scale, | |
| classifier_free_guidance_scale, | |
| num_iterations, | |
| loss_threshold, | |
| num_guidance_steps, | |
| seed, | |
| ): | |
| if not torch.cuda.is_available(): | |
| raise gr.Error("cuda is not available") | |
| device = torch.device("cuda") | |
| scheduler = DDIMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False) | |
| model = StableDiffusionXLPipeline.from_pretrained(MODEL_PATH, scheduler=scheduler, torch_dtype=torch.float16).to(device) | |
| model.unet.set_default_attn_processor() | |
| model.enable_sequential_cpu_offload() | |
| seed_everything(seed) | |
| start_code = torch.randn([len(prompts), 4, 128, 128], device=device) | |
| eos_token_index = None if num_tokens is None else num_tokens + 1 | |
| editor = BoundedAttention( | |
| boxes, | |
| prompts, | |
| subject_token_indices, | |
| list(range(70, 82)), | |
| list(range(70, 82)), | |
| filter_token_indices=filter_token_indices, | |
| eos_token_index=eos_token_index, | |
| cross_loss_coef=cross_loss_scale, | |
| self_loss_coef=self_loss_scale, | |
| max_guidance_iter=num_guidance_steps, | |
| max_guidance_iter_per_step=num_iterations, | |
| start_step_size=init_step_size, | |
| end_step_size=final_step_size, | |
| loss_stopping_value=loss_threshold, | |
| num_clusters_per_box=num_clusters_per_subject, | |
| ) | |
| register_attention_editor_diffusers(model, editor) | |
| return model(prompts, latents=start_code, guidance_scale=classifier_free_guidance_scale).images | |
| def generate( | |
| prompt, | |
| subject_token_indices, | |
| filter_token_indices, | |
| num_tokens, | |
| init_step_size, | |
| final_step_size, | |
| num_clusters_per_subject, | |
| cross_loss_scale, | |
| self_loss_scale, | |
| classifier_free_guidance_scale, | |
| batch_size, | |
| num_iterations, | |
| loss_threshold, | |
| num_guidance_steps, | |
| seed, | |
| boxes, | |
| ): | |
| subject_token_indices = convert_token_indices(subject_token_indices, nested=True) | |
| if len(boxes) != len(subject_token_indices): | |
| raise gr.Error(""" | |
| The number of boxes should be equal to the number of subjects. | |
| Number of boxes drawn: {}, number of subjects: {}. | |
| """.format(len(boxes), len(subject_token_indices))) | |
| filter_token_indices = convert_token_indices(filter_token_indices) if len(filter_token_indices.strip()) > 0 else None | |
| num_tokens = int(num_tokens) if len(num_tokens.strip()) > 0 else None | |
| prompts = [prompt.strip(".").strip(",").strip()] * batch_size | |
| images = inference( | |
| boxes, prompts, subject_token_indices, filter_token_indices, num_tokens, init_step_size, | |
| final_step_size, num_clusters_per_subject, cross_loss_scale, self_loss_scale, classifier_free_guidance_scale, | |
| num_iterations, loss_threshold, num_guidance_steps, seed) | |
| return images | |
| def convert_token_indices(token_indices, nested=False): | |
| if nested: | |
| return [convert_token_indices(indices, nested=False) for indices in token_indices.split(";")] | |
| return [int(index.strip()) for index in token_indices.split(",") if len(index.strip()) > 0] | |
| def draw(sketchpad): | |
| boxes = [] | |
| for i, layer in enumerate(sketchpad["layers"]): | |
| non_zeros = layer.nonzero() | |
| x1 = x2 = y1 = y2 = 0 | |
| if len(non_zeros[0]) > 0: | |
| x1x2 = non_zeros[1] / layer.shape[1] | |
| y1y2 = non_zeros[0] / layer.shape[0] | |
| x1 = x1x2.min() | |
| x2 = x1x2.max() | |
| y1 = y1y2.min() | |
| y2 = y1y2.max() | |
| if (x2 - x1 < MIN_SIZE) or (y2 - y1 < MIN_SIZE): | |
| raise gr.Error(f"Box in layer {i} is too small") | |
| boxes.append((x1, y1, x2, y2)) | |
| layout_image = draw_boxes(boxes) | |
| return [boxes, layout_image] | |
| def draw_boxes(boxes, is_sketch=False): | |
| if len(boxes) == 0: | |
| return None | |
| boxes = np.array(boxes) * RESOLUTION | |
| image = Image.new("RGB", (RESOLUTION, RESOLUTION), (WHITE, WHITE, WHITE)) | |
| drawing = ImageDraw.Draw(image) | |
| for i, box in enumerate(boxes.astype(int).tolist()): | |
| color = "black" if is_sketch else COLORS[i % len(COLORS)] | |
| drawing.rectangle(box, outline=color, width=4) | |
| return image | |
| def clear(batch_size): | |
| return [[], None, None, None] | |
| def main(): | |
| nltk.download("averaged_perceptron_tagger") | |
| with gr.Blocks( | |
| css=CSS, | |
| title="Bounded Attention demo", | |
| ) as demo: | |
| gr.HTML(DESCRIPTION) | |
| gr.HTML(COPY_LINK) | |
| with gr.Column(): | |
| gr.HTML("Scroll down to see examples of the required input format.") | |
| prompt = gr.Textbox( | |
| label="Text prompt", | |
| ) | |
| subject_token_indices = gr.Textbox( | |
| label="The token indices of each subject (separate indices for the same subject with commas, and for different subjects with semicolons)", | |
| ) | |
| filter_token_indices = gr.Textbox( | |
| label="Optional: The token indices to filter, i.e. conjunctions, numbers, postional relations, etc. (if left empty, this will be automatically inferred)", | |
| ) | |
| num_tokens = gr.Textbox( | |
| label="Optional: The number of tokens in the prompt (We use this to verify your input, as sometimes rare words are split into more than one token)", | |
| ) | |
| with gr.Row(): | |
| sketchpad = gr.Sketchpad(label="Sketch Pad (draw each bounding box in a different layer)") | |
| layout_image = gr.Image(type="pil", label="Bounding Boxes", interactive=False) | |
| with gr.Row(): | |
| clear_button = gr.Button(value="Clear") | |
| generate_layout_button = gr.Button(value="Generate layout") | |
| generate_image_button = gr.Button(value="Generate image") | |
| with gr.Row(): | |
| out_images = gr.Gallery(type="pil", label="Generated Images", interactive=False) | |
| with gr.Accordion("Advanced Options", open=False): | |
| with gr.Column(): | |
| gr.HTML(ADVANCED_OPTION_DESCRIPTION) | |
| batch_size = gr.Slider(minimum=1, maximum=5, step=1, value=1, label="Number of samples (limited to one sample on current space)") | |
| num_guidance_steps = gr.Slider(minimum=5, maximum=20, step=1, value=8, label="Number of timesteps to perform guidance") | |
| init_step_size = gr.Slider(minimum=0, maximum=50, step=0.5, value=25, label="Initial step size") | |
| final_step_size = gr.Slider(minimum=0, maximum=20, step=0.5, value=10, label="Final step size") | |
| num_clusters_per_subject = gr.Slider(minimum=0, maximum=5, step=0.5, value=3, label="Number of clusters per subject") | |
| cross_loss_scale = gr.Slider(minimum=0, maximum=2, step=0.1, value=1, label="Cross-attention loss scale factor") | |
| self_loss_scale = gr.Slider(minimum=0, maximum=2, step=0.1, value=1, label="Self-attention loss scale factor") | |
| num_iterations = gr.Slider(minimum=0, maximum=10, step=1, value=5, label="Number of Gradient Descent iterations") | |
| loss_threshold = gr.Slider(minimum=0, maximum=1, step=0.1, value=0.2, label="Loss threshold") | |
| classifier_free_guidance_scale = gr.Slider(minimum=0, maximum=50, step=0.5, value=7.5, label="Classifier-free guidance Scale") | |
| seed = gr.Slider(minimum=0, maximum=1000, step=1, value=445, label="Random Seed") | |
| boxes = gr.State([]) | |
| clear_button.click( | |
| clear, | |
| inputs=[batch_size], | |
| outputs=[boxes, sketchpad, layout_image, out_images], | |
| queue=False, | |
| ) | |
| generate_layout_button.click( | |
| draw, | |
| inputs=[sketchpad], | |
| outputs=[boxes, layout_image], | |
| queue=False, | |
| ) | |
| generate_image_button.click( | |
| fn=generate, | |
| inputs=[ | |
| prompt, subject_token_indices, filter_token_indices, num_tokens, | |
| init_step_size, final_step_size, num_clusters_per_subject, cross_loss_scale, self_loss_scale, | |
| classifier_free_guidance_scale, batch_size, num_iterations, loss_threshold, num_guidance_steps, | |
| seed, | |
| boxes, | |
| ], | |
| outputs=[out_images], | |
| queue=True, | |
| ) | |
| with gr.Column(): | |
| gr.Examples( | |
| examples=[ | |
| ["a ginger kitten and a gray puppy in a yard", "2,3;6,7", "1,4,5,8,9", "10"], | |
| ["a realistic photo of a highway with a semi trailer and a concrete mixer and a helicopter", "9,10;13,14;17", "1,4,5,7,8,11,12,15,16", "17"], | |
| ], | |
| inputs=[prompt, subject_token_indices, filter_token_indices, num_tokens], | |
| ) | |
| gr.HTML(FOOTNOTE) | |
| demo.launch(show_api=False, show_error=True) | |
| if __name__ == "__main__": | |
| main() | |