Spaces:
Running
on
Zero
Running
on
Zero
File size: 4,167 Bytes
1250a66 aedf846 e5d1cff d4b8499 6110d64 d4b8499 aedf846 1250a66 d4b8499 d939c81 e5d1cff 1250a66 aedf846 1250a66 e5d1cff aedf846 e5d1cff d4b8499 aedf846 1250a66 5fc685b 6110d64 5fc685b 1250a66 5fc685b 6110d64 5fc685b 7c9b0bf 5fc685b 1250a66 aedf846 e4e298c aedf846 57aaf0c aedf846 657cb4e 87f19f6 19bf9cf 169a280 aedf846 1250a66 f845c63 d4b8499 f845c63 88260e2 d4b8499 88260e2 f845c63 87f19f6 f845c63 87f19f6 f845c63 1250a66 aedf846 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 |
import torch
import spaces
import os
from diffusers.utils import load_image
from diffusers import FluxControlNetModel, FluxControlNetPipeline, AutoencoderKL
from diffusers import BitsAndBytesConfig as DiffusersBitsAndBytesConfig
from transformers import T5EncoderModel
from transformers import BitsAndBytesConfig as TransformersBitsAndBytesConfig
from peft import PeftModel, PeftConfig
import gradio as gr
huggingface_token = os.getenv("HUGGINFACE_TOKEN")
quant_config = TransformersBitsAndBytesConfig(load_in_8bit=True,)
text_encoder_2_8bit = T5EncoderModel.from_pretrained(
"LPX55/FLUX.1-merged_uncensored",
subfolder="text_encoder_2",
quantization_config=quant_config,
torch_dtype=torch.bfloat16,
token=huggingface_token
)
good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=torch.bfloat16, token=huggingface_token).to("cuda")
# Load pipeline
controlnet = FluxControlNetModel.from_pretrained(
"jasperai/Flux.1-dev-Controlnet-Upscaler",
torch_dtype=torch.bfloat16
)
pipe = FluxControlNetPipeline.from_pretrained(
"LPX55/FLUX.1-merged_uncensored",
controlnet=controlnet,
torch_dtype=torch.bfloat16,
vae=good_vae,
text_encoder_2=text_encoder_2_8bit,
token=huggingface_token
)
adapter_id = "alimama-creative/FLUX.1-Turbo-Alpha"
adapter_id2 = "XLabs-AI/flux-RealismLora"
adapter_id3 = "enhanceaiteam/Flux-uncensored-v2"
pipe.to("cuda")
pipe.load_lora_weights(adapter_id, adapter_name="turbo")
pipe.load_lora_weights(adapter_id2, adapter_name="real")
pipe.load_lora_weights(adapter_id3, weight_name="lora.safetensors", adapter_name="enhance")
pipe.set_adapters(["turbo", "real", "enhance"], adapter_weights=[0.9, 0.66, 0.6])
pipe.fuse_lora(adapter_names=["turbo", "real", "enhance"], lora_scale=1.0)
pipe.unload_lora_weights()
# pipe.enable_xformers_memory_efficient_attention()
# save to the Hub
# pipe.push_to_hub("fused-t-r")
@spaces.GPU
def generate_image(prompt, scale, steps, control_image, controlnet_conditioning_scale, guidance_scale):
# Load control image
control_image = load_image(control_image)
w, h = control_image.size
# Upscale x1
control_image = control_image.resize((int(w * scale), int(h * scale)), resample=2) # Resample.BILINEAR
print("Size to: " + str(control_image.size[0]) + ", " + str(control_image.size[1]))
with torch.inference_mode():
image = pipe(
prompt=prompt,
control_image=control_image,
controlnet_conditioning_scale=controlnet_conditioning_scale,
num_inference_steps=steps,
guidance_scale=guidance_scale,
height=control_image.size[1],
width=control_image.size[0]
).images[0]
return image
# Create Gradio interface with rows and columns
with gr.Blocks(title="FLUX ControlNet Image Generation", fill_height=True) as iface:
gr.Markdown("# FLUX ControlNet Image Generation")
gr.Markdown("Generate images using the FluxControlNetPipeline. Upload a control image and enter a prompt to create an image.")
with gr.Row():
control_image = gr.Image(type="pil", label="Control Image", show_label=False)
generated_image = gr.Image(type="pil", label="Generated Image", format="png", show_label=False)
with gr.Row():
with gr.Column(scale=1):
prompt = gr.Textbox(lines=4, placeholder="Enter your prompt here...", label="Prompt")
generate_button = gr.Button("Generate Image", variant="primary")
with gr.Column(scale=1):
scale = gr.Slider(1, 3, value=1, label="Scale")
steps = gr.Slider(6, 30, value=8, label="Steps")
guidance_scale = gr.Slider(1, 20, value=3.5, label="Guidance Scale")
controlnet_conditioning_scale = gr.Slider(0, 1, value=0.6, label="ControlNet Scale")
with gr.Row():
gr.Markdown("**Tips:** Lorum ipsum")
generate_button.click(
fn=generate_image,
inputs=[prompt, scale, steps, control_image, controlnet_conditioning_scale, guidance_scale],
outputs=[generated_image]
)
# Launch the app
iface.launch() |