Spaces:
Running
on
Zero
Running
on
Zero
gaparmar
commited on
Commit
·
f197aad
1
Parent(s):
2bba48b
fix rho in the examples
Browse files
app.py
CHANGED
|
@@ -19,7 +19,6 @@ import argparse
|
|
| 19 |
precision = get_precision()
|
| 20 |
transformer = NunchakuFluxTransformer2dModel.from_pretrained(f"nunchaku-tech/nunchaku-flux.1-schnell/svdq-{precision}_r32-flux.1-schnell.safetensors")
|
| 21 |
pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", transformer=transformer, torch_dtype=torch.bfloat16).to("cuda")
|
| 22 |
-
# pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1").to("cuda")
|
| 23 |
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16).to("cuda")
|
| 24 |
|
| 25 |
m_clip = CLIPModel.from_pretrained("multimodalart/clip-vit-base-patch32").to("cuda")
|
|
@@ -157,7 +156,7 @@ def get_score_functions(unary_term, binary_term, prompt):
|
|
| 157 |
return unary_score_fn, binary_score_fn
|
| 158 |
|
| 159 |
|
| 160 |
-
@spaces.GPU(duration=
|
| 161 |
def generate_images(prompt, starting_candidates, output_group_size, pruning_ratio,
|
| 162 |
lambda_score, seed, unary_term, binary_term, progress=gr.Progress(track_tqdm=True)):
|
| 163 |
"""Generate images using group inference with progressive pruning."""
|
|
@@ -283,10 +282,10 @@ with gr.Blocks(css=custom_css, js=js_func, theme=gr.themes.Soft(), elem_id="main
|
|
| 283 |
|
| 284 |
gr.Examples(
|
| 285 |
examples=[
|
| 286 |
-
["A photo of a dog",
|
| 287 |
-
["A mountain landscape",
|
| 288 |
-
["A cat sleeping",
|
| 289 |
-
["A sunset at the beach",
|
| 290 |
],
|
| 291 |
inputs=[prompt, starting_candidates, output_group_size, pruning_ratio, lambda_score, seed, unary_term, binary_term],
|
| 292 |
outputs=[output_gallery_group],
|
|
|
|
| 19 |
precision = get_precision()
|
| 20 |
transformer = NunchakuFluxTransformer2dModel.from_pretrained(f"nunchaku-tech/nunchaku-flux.1-schnell/svdq-{precision}_r32-flux.1-schnell.safetensors")
|
| 21 |
pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", transformer=transformer, torch_dtype=torch.bfloat16).to("cuda")
|
|
|
|
| 22 |
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16).to("cuda")
|
| 23 |
|
| 24 |
m_clip = CLIPModel.from_pretrained("multimodalart/clip-vit-base-patch32").to("cuda")
|
|
|
|
| 156 |
return unary_score_fn, binary_score_fn
|
| 157 |
|
| 158 |
|
| 159 |
+
@spaces.GPU(duration=200)
|
| 160 |
def generate_images(prompt, starting_candidates, output_group_size, pruning_ratio,
|
| 161 |
lambda_score, seed, unary_term, binary_term, progress=gr.Progress(track_tqdm=True)):
|
| 162 |
"""Generate images using group inference with progressive pruning."""
|
|
|
|
| 282 |
|
| 283 |
gr.Examples(
|
| 284 |
examples=[
|
| 285 |
+
["A photo of a dog", 32, 4, 0.9, 1.0, 42, "clip_text_img", "diversity_dino"],
|
| 286 |
+
["A mountain landscape", 32, 4, 0.9, 1.0, 123, "clip_text_img", "diversity_dino"],
|
| 287 |
+
["A cat sleeping", 32, 4, 0.9, 1.0, 456, "clip_text_img", "diversity_dino"],
|
| 288 |
+
["A sunset at the beach", 32, 4, 0.9, 1.0, 789, "clip_text_img", "diversity_dino"],
|
| 289 |
],
|
| 290 |
inputs=[prompt, starting_candidates, output_group_size, pruning_ratio, lambda_score, seed, unary_term, binary_term],
|
| 291 |
outputs=[output_gallery_group],
|