|
|
import os |
|
|
import spaces |
|
|
import time |
|
|
import gradio as gr |
|
|
import torch |
|
|
import functools |
|
|
import numpy as np |
|
|
import torch.nn.functional as F |
|
|
from diffusers import FluxPipeline, AutoencoderTiny |
|
|
from transformers import CLIPProcessor, CLIPModel, AutoModel |
|
|
from transformers.models.clip.modeling_clip import _get_vector_norm |
|
|
from nunchaku import NunchakuFluxTransformer2dModel |
|
|
from nunchaku.utils import get_precision |
|
|
from my_utils.group_inference import run_group_inference |
|
|
from my_utils.default_values import apply_defaults |
|
|
|
|
|
import argparse |
|
|
|
|
|
precision = get_precision() |
|
|
transformer = NunchakuFluxTransformer2dModel.from_pretrained(f"nunchaku-tech/nunchaku-flux.1-schnell/svdq-{precision}_r32-flux.1-schnell.safetensors") |
|
|
pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", transformer=transformer, torch_dtype=torch.bfloat16).to("cuda") |
|
|
|
|
|
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16).to("cuda") |
|
|
|
|
|
m_clip = CLIPModel.from_pretrained("multimodalart/clip-vit-base-patch32").to("cuda") |
|
|
prep_clip = CLIPProcessor.from_pretrained("multimodalart/clip-vit-base-patch32") |
|
|
dino_model = AutoModel.from_pretrained('facebook/dinov2-base').to("cuda") |
|
|
|
|
|
|
|
|
default_args = argparse.Namespace( |
|
|
model_name="flux-schnell", |
|
|
prompt=None, |
|
|
starting_candidates=None, |
|
|
output_group_size=None, |
|
|
pruning_ratio=None, |
|
|
lambda_score=None, |
|
|
seed=None, |
|
|
unary_term="clip_text_img", |
|
|
binary_term="diversity_dino", |
|
|
guidance_scale=None, |
|
|
num_inference_steps=None, |
|
|
height=None, |
|
|
width=None, |
|
|
) |
|
|
default_args = apply_defaults(default_args) |
|
|
|
|
|
|
|
|
|
|
|
@torch.no_grad() |
|
|
def unary_clip_text_img_score(l_images, target_caption, device="cuda"): |
|
|
"""Compute CLIP text-image similarity scores.""" |
|
|
_img_std = torch.tensor([0.26862954, 0.26130258, 0.27577711]).view(1, 3, 1, 1).to(device) |
|
|
_img_mean = torch.tensor([0.48145466, 0.4578275, 0.40821073]).view(1, 3, 1, 1).to(device) |
|
|
|
|
|
b_images = torch.cat(l_images, dim=0) |
|
|
b_images = F.interpolate(b_images, size=(224, 224), mode="bilinear", align_corners=False) |
|
|
b_images = b_images * 0.5 + 0.5 |
|
|
b_images = (b_images - _img_mean) / _img_std |
|
|
|
|
|
text_encoding = prep_clip.tokenizer(target_caption, return_tensors="pt", padding=True).to(device) |
|
|
output = m_clip(pixel_values=b_images, **text_encoding).logits_per_image / m_clip.logit_scale.exp() |
|
|
return output.view(-1).cpu().numpy() |
|
|
|
|
|
|
|
|
@torch.no_grad() |
|
|
def binary_dino_diversity_score(l_images, device="cuda"): |
|
|
"""Compute pairwise diversity scores using DINO.""" |
|
|
b_images = torch.cat(l_images, dim=0) |
|
|
_img_mean = torch.tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(device) |
|
|
_img_std = torch.tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(device) |
|
|
|
|
|
b_images = F.interpolate(b_images, size=(256, 256), mode="bilinear", align_corners=False) |
|
|
b_images = b_images * 0.5 + 0.5 |
|
|
b_images = (b_images - _img_mean) / _img_std |
|
|
all_features = dino_model(pixel_values=b_images).last_hidden_state[:, 1:, :].cpu() |
|
|
|
|
|
N = len(l_images) |
|
|
score_matrix = np.zeros((N, N)) |
|
|
for i in range(N): |
|
|
f1 = all_features[i] |
|
|
for j in range(i+1, N): |
|
|
f2 = all_features[j] |
|
|
cos_sim = (1 - F.cosine_similarity(f1, f2, dim=1)).mean().item() |
|
|
score_matrix[i, j] = cos_sim |
|
|
return score_matrix |
|
|
|
|
|
|
|
|
@torch.no_grad() |
|
|
def binary_dino_cls_score(l_images, device="cuda"): |
|
|
"""Compute pairwise diversity scores using DINO CLS tokens.""" |
|
|
b_images = torch.cat(l_images, dim=0) |
|
|
_img_mean = torch.tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(device) |
|
|
_img_std = torch.tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(device) |
|
|
|
|
|
b_images = F.interpolate(b_images, size=(256, 256), mode="bilinear", align_corners=False) |
|
|
b_images = b_images * 0.5 + 0.5 |
|
|
b_images = (b_images - _img_mean) / _img_std |
|
|
all_features = dino_model(pixel_values=b_images).last_hidden_state[:, 0:1, :].cpu() |
|
|
|
|
|
N = len(l_images) |
|
|
score_matrix = np.zeros((N, N)) |
|
|
for i in range(N): |
|
|
f1 = all_features[i] |
|
|
for j in range(i+1, N): |
|
|
f2 = all_features[j] |
|
|
cos_sim = (1 - F.cosine_similarity(f1, f2, dim=1)).mean().item() |
|
|
score_matrix[i, j] = cos_sim |
|
|
return score_matrix |
|
|
|
|
|
|
|
|
@torch.no_grad() |
|
|
def binary_clip_diversity_score(l_images, device="cuda"): |
|
|
"""Compute pairwise diversity scores using CLIP.""" |
|
|
_img_std = torch.tensor([0.26862954, 0.26130258, 0.27577711]).view(1, 3, 1, 1).to(device) |
|
|
_img_mean = torch.tensor([0.48145466, 0.4578275, 0.40821073]).view(1, 3, 1, 1).to(device) |
|
|
|
|
|
b_images = torch.cat(l_images, dim=0) |
|
|
b_images = F.interpolate(b_images, size=(224, 224), mode="bilinear", align_corners=False) |
|
|
b_images = b_images * 0.5 + 0.5 |
|
|
b_images = (b_images - _img_mean) / _img_std |
|
|
|
|
|
vision_outputs = m_clip.vision_model( |
|
|
pixel_values=b_images, |
|
|
output_attentions=False, |
|
|
output_hidden_states=False, |
|
|
interpolate_pos_encoding=False, |
|
|
return_dict=True |
|
|
) |
|
|
image_embeds = m_clip.visual_projection(vision_outputs[1]) |
|
|
image_embeds = image_embeds / _get_vector_norm(image_embeds) |
|
|
|
|
|
N = len(l_images) |
|
|
score_matrix = np.zeros((N, N)) |
|
|
for i in range(N): |
|
|
f1 = image_embeds[i] |
|
|
for j in range(i+1, N): |
|
|
f2 = image_embeds[j] |
|
|
cos_sim = (1 - torch.dot(f1, f2)).item() |
|
|
score_matrix[i, j] = cos_sim |
|
|
return score_matrix |
|
|
|
|
|
|
|
|
def get_score_functions(unary_term, binary_term, prompt): |
|
|
"""Get the appropriate scoring functions based on selected terms.""" |
|
|
|
|
|
unary_score_fn = functools.partial(unary_clip_text_img_score, target_caption=prompt, device="cuda") |
|
|
|
|
|
if binary_term == "diversity_dino": |
|
|
binary_score_fn = functools.partial(binary_dino_diversity_score, device="cuda") |
|
|
elif binary_term == "dino_cls_pairwise": |
|
|
binary_score_fn = functools.partial(binary_dino_cls_score, device="cuda") |
|
|
elif binary_term == "diversity_clip": |
|
|
binary_score_fn = functools.partial(binary_clip_diversity_score, device="cuda") |
|
|
else: |
|
|
raise ValueError(f"Invalid binary term: {binary_term}") |
|
|
|
|
|
return unary_score_fn, binary_score_fn |
|
|
|
|
|
|
|
|
@spaces.GPU(duration=300) |
|
|
def generate_images(prompt, starting_candidates, output_group_size, pruning_ratio, |
|
|
lambda_score, seed, unary_term, binary_term, progress=gr.Progress(track_tqdm=True)): |
|
|
"""Generate images using group inference with progressive pruning.""" |
|
|
|
|
|
|
|
|
unary_score_fn, binary_score_fn = get_score_functions(unary_term, binary_term, prompt) |
|
|
|
|
|
|
|
|
inference_args = { |
|
|
"model_name": "flux-schnell", |
|
|
"prompt": prompt, |
|
|
"guidance_scale": default_args.guidance_scale, |
|
|
"num_inference_steps": default_args.num_inference_steps, |
|
|
"max_sequence_length": 256, |
|
|
"height": default_args.height, |
|
|
"width": default_args.width, |
|
|
"unary_score_fn": unary_score_fn, |
|
|
"binary_score_fn": binary_score_fn, |
|
|
"output_group_size": output_group_size, |
|
|
"pruning_ratio": pruning_ratio, |
|
|
"lambda_score": lambda_score, |
|
|
"l_generator": [torch.Generator("cpu").manual_seed(seed + i) for i in range(starting_candidates)], |
|
|
"starting_candidates": starting_candidates, |
|
|
"skip_first_cfg": True, |
|
|
} |
|
|
print(f"pruning ratio is: {pruning_ratio}") |
|
|
|
|
|
t_start = time.time() |
|
|
output_group = run_group_inference(pipe, **inference_args) |
|
|
t_end = time.time() |
|
|
print(f"Time taken for group inference: {t_end - t_start} seconds") |
|
|
return output_group |
|
|
|
|
|
|
|
|
|
|
|
css_path = os.path.join(os.path.dirname(__file__), "styles.css") |
|
|
with open(css_path, "r") as f: |
|
|
custom_css = f.read() |
|
|
|
|
|
|
|
|
js_func = """ |
|
|
function refresh() { |
|
|
const url = new URL(window.location); |
|
|
if (url.searchParams.get('__theme') !== 'light') { |
|
|
url.searchParams.set('__theme', 'light'); |
|
|
window.location.href = url.href; |
|
|
} |
|
|
} |
|
|
""" |
|
|
|
|
|
|
|
|
with gr.Blocks(css=custom_css, js=js_func, theme=gr.themes.Soft(), elem_id="main-container") as demo: |
|
|
|
|
|
|
|
|
gr.HTML( |
|
|
""" |
|
|
<div class="title_left"> |
|
|
<h1>Scaling Group Inference for Diverse and High-Quality Generation</h1> |
|
|
<div class="author-container"> |
|
|
<div class="grid-item cmu"><a href="https://gauravparmar.com/">Gaurav Parmar</a></div> |
|
|
<div class="grid-item snap"><a href="https://orpatashnik.github.io/">Or Patashnik</a></div> |
|
|
<div class="grid-item snap"><a href="https://scholar.google.com/citations?user=uD79u6oAAAAJ&hl=en">Daniil Ostashev</a></div> |
|
|
<div class="grid-item snap"><a href="https://wangkua1.github.io/">Kuan-Chieh (Jackson) Wang</a></div> |
|
|
<div class="grid-item snap"><a href="https://kfiraberman.github.io/">Kfir Aberman</a></div> |
|
|
</div> |
|
|
<div class="author-container"> |
|
|
<div class="grid-item cmu"><a href="https://www.cs.cmu.edu/~srinivas/">Srinivasa Narasimhan</a></div> |
|
|
<div class="grid-item cmu"><a href="https://www.cs.cmu.edu/~junyanz/">Jun-Yan Zhu</a></div> |
|
|
</div> |
|
|
<br> |
|
|
<div class="affiliation-container"> |
|
|
<div class="grid-item cmu"> <p>Carnegie Mellon University</p></div> |
|
|
<div class="grid-item snap"> <p>Snap Research</p></div> |
|
|
</div> |
|
|
|
|
|
<br> |
|
|
<h2>DEMO: Text-to-Image Group Inference with FLUX.1-Schnell</h2> |
|
|
</div> |
|
|
""" |
|
|
) |
|
|
|
|
|
with gr.Row(scale=1): |
|
|
with gr.Column(scale=1.0): |
|
|
prompt = gr.Textbox(label="Prompt", placeholder="A photo of a dog", lines=4, value="A photo of a dog") |
|
|
|
|
|
with gr.Column(scale=1.0): |
|
|
with gr.Row(elem_id="starting-candidates-row"): |
|
|
gr.Text("Starting Candidates:", container=False, interactive=False, scale=5) |
|
|
starting_candidates = gr.Number(value=default_args.starting_candidates, precision=0, container=False, show_label=False, scale=1) |
|
|
|
|
|
with gr.Row(elem_id="output-group-size-row"): |
|
|
gr.Text("Output Group Size:", container=False, interactive=False, scale=5) |
|
|
output_group_size = gr.Number(value=default_args.output_group_size, precision=0, container=False, show_label=False, scale=1) |
|
|
|
|
|
with gr.Column(scale=1.0): |
|
|
with gr.Accordion("Advanced Options", open=False, elem_id="advanced-options-accordion"): |
|
|
with gr.Row(): |
|
|
gr.Text("Pruning Ratio:", container=False, interactive=False, elem_id="pruning-ratio-label", scale=3) |
|
|
pruning_ratio = gr.Number(value=default_args.pruning_ratio, precision=2, container=False, show_label=False, scale=1) |
|
|
|
|
|
with gr.Row(): |
|
|
gr.Text("Lambda:", container=False, interactive=False, elem_id="lambda-label", scale=5) |
|
|
lambda_score = gr.Number(value=default_args.lambda_score, precision=1, container=False, show_label=False, scale=1) |
|
|
|
|
|
with gr.Row(): |
|
|
gr.Text("Seed:", container=False, interactive=False, elem_id="seed-label", scale=5) |
|
|
seed = gr.Number(value=42, precision=0, container=False, show_label=False, scale=1) |
|
|
|
|
|
with gr.Row(): |
|
|
gr.Text("Unary:", container=False, interactive=False, elem_id="unary-term-label", scale=2) |
|
|
unary_term = gr.Dropdown(choices=["clip_text_img"], value=default_args.unary_term, container=False, show_label=False, scale=3) |
|
|
|
|
|
with gr.Row(): |
|
|
gr.Text("Binary:", container=False, interactive=False, elem_id="binary-term-label", scale=2) |
|
|
binary_term = gr.Dropdown(choices=["diversity_dino", "diversity_clip", "dino_cls_pairwise"], value=default_args.binary_term, |
|
|
container=False, show_label=False, scale=3) |
|
|
|
|
|
with gr.Row(scale=1): |
|
|
generate_btn = gr.Button("Generate", variant="primary") |
|
|
|
|
|
with gr.Row(scale=1): |
|
|
output_gallery_group = gr.Gallery(label="Group Inference", show_label=True,elem_id="gallery", columns=4, height="auto") |
|
|
|
|
|
gr.Examples( |
|
|
examples=[ |
|
|
["A photo of a dog", 64, 4, 0.5, 1.0, 42, "clip_text_img", "diversity_dino"], |
|
|
["A mountain landscape", 64, 4, 0.5, 1.0, 123, "clip_text_img", "diversity_dino"], |
|
|
["A cat sleeping", 64, 4, 0.5, 1.0, 456, "clip_text_img", "diversity_dino"], |
|
|
["A sunset at the beach", 64, 4, 0.5, 1.0, 789, "clip_text_img", "diversity_dino"], |
|
|
], |
|
|
inputs=[prompt, starting_candidates, output_group_size, pruning_ratio, lambda_score, seed, unary_term, binary_term], |
|
|
outputs=[output_gallery_group], |
|
|
fn=generate_images, |
|
|
cache_examples="lazy", |
|
|
label="Examples" |
|
|
) |
|
|
|
|
|
generate_btn.click( |
|
|
fn=generate_images, |
|
|
inputs=[prompt, starting_candidates, output_group_size, pruning_ratio, lambda_score, seed, unary_term, binary_term], |
|
|
outputs=[output_gallery_group] |
|
|
) |
|
|
|
|
|
demo.launch() |