Spaces:
Running
on
Zero
Running
on
Zero
Create optimized.py
Browse files- optimized.py +75 -0
optimized.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import spaces
|
| 3 |
+
import os
|
| 4 |
+
from diffusers.utils import load_image
|
| 5 |
+
from diffusers import FluxControlNetModel, FluxControlNetPipeline, AutoencoderKL
|
| 6 |
+
import gradio as gr
|
| 7 |
+
huggingface_token = os.getenv("HUGGINFACE_TOKEN")
|
| 8 |
+
|
| 9 |
+
good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=torch.bfloat16, token=huggingface_token).to("cuda")
|
| 10 |
+
|
| 11 |
+
# Load pipeline
|
| 12 |
+
controlnet = FluxControlNetModel.from_pretrained(
|
| 13 |
+
"jasperai/Flux.1-dev-Controlnet-Upscaler",
|
| 14 |
+
torch_dtype=torch.bfloat16
|
| 15 |
+
)
|
| 16 |
+
pipe = FluxControlNetPipeline.from_pretrained(
|
| 17 |
+
"LPX55/FLUX.1-merged_uncensored",
|
| 18 |
+
controlnet=controlnet,
|
| 19 |
+
torch_dtype=torch.bfloat16,
|
| 20 |
+
device_map="auto",
|
| 21 |
+
vae=good_vae,
|
| 22 |
+
token=huggingface_token
|
| 23 |
+
)
|
| 24 |
+
# Add to your pipeline initialization:
|
| 25 |
+
# pipe.enable_xformers_memory_efficient_attention()
|
| 26 |
+
# pipe.enable_vae_slicing() # Batch processing of VAE
|
| 27 |
+
pipe.enable_model_cpu_offload() # Use with accelerate
|
| 28 |
+
|
| 29 |
+
# Convert all models to memory-efficient format
|
| 30 |
+
pipe.to(memory_format=torch.channels_last)
|
| 31 |
+
pipe.to("cuda")
|
| 32 |
+
|
| 33 |
+
@spaces.GPU
|
| 34 |
+
def generate_image(prompt, scale, steps, control_image, controlnet_conditioning_scale, guidance_scale):
|
| 35 |
+
# Load control image
|
| 36 |
+
control_image = control_image.resize((int(w * scale), int(h * scale)), PIL.Image.BICUBIC)
|
| 37 |
+
# control_image = load_image(control_image)
|
| 38 |
+
w, h = control_image.size
|
| 39 |
+
# Upscale x1
|
| 40 |
+
control_image = control_image.resize((int(w * scale), int(h * scale)))
|
| 41 |
+
print("Size to: " + str(control_image.size[0]) + ", " + str(control_image.size[1]))
|
| 42 |
+
image = pipe(
|
| 43 |
+
prompt=prompt,
|
| 44 |
+
control_image=control_image,
|
| 45 |
+
controlnet_conditioning_scale=controlnet_conditioning_scale,
|
| 46 |
+
num_inference_steps=steps,
|
| 47 |
+
guidance_scale=guidance_scale,
|
| 48 |
+
height=control_image.size[1],
|
| 49 |
+
width=control_image.size[0],
|
| 50 |
+
torch_dtype=torch.bfloat16,
|
| 51 |
+
device_map="auto"
|
| 52 |
+
).images[0]
|
| 53 |
+
torch.cuda.empty_cache()
|
| 54 |
+
return image
|
| 55 |
+
|
| 56 |
+
# Create Gradio interface
|
| 57 |
+
iface = gr.Interface(
|
| 58 |
+
fn=generate_image,
|
| 59 |
+
inputs=[
|
| 60 |
+
gr.Textbox(lines=2, placeholder="Enter your prompt here..."),
|
| 61 |
+
gr.Slider(1, 3, value=1, label="Scale"),
|
| 62 |
+
gr.Slider(6, 30, value=8, label="Steps"),
|
| 63 |
+
gr.Image(type="pil", label="Control Image"),
|
| 64 |
+
gr.Slider(0, 1, value=0.6, label="ControlNet Scale"),
|
| 65 |
+
gr.Slider(1, 20, value=3.5, label="Guidance Scale"),
|
| 66 |
+
],
|
| 67 |
+
outputs=[
|
| 68 |
+
gr.Image(type="pil", label="Generated Image", format="png"),
|
| 69 |
+
],
|
| 70 |
+
title="FLUX ControlNet Image Generation",
|
| 71 |
+
description="Generate images using the FluxControlNetPipeline. Upload a control image and enter a prompt to create an image.",
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
# Launch the app
|
| 75 |
+
iface.launch()
|