akhaliq's picture
akhaliq HF Staff
Update app.py
663212e verified
raw
history blame
12.4 kB
import spaces
import gradio as gr
import torch
import numpy as np
import random
import time
import os
from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler
from transformers import AutoTokenizer, Qwen3ForCausalLM
from controlnet_aux.processor import Processor
from PIL import Image
from safetensors.torch import load_file
# Import pipeline and model
# Ensure videox_fun is in your python path
from videox_fun.pipeline import ZImageControlPipeline
from videox_fun.models import ZImageControlTransformer2DModel
# Try to import prompt utility, define fallback if missing
try:
from utils.prompt_utils import polish_prompt
except ImportError:
print("utils.prompt_utils not found. Using passthrough for prompt polishing.")
def polish_prompt(prompt):
return prompt
# Configuration
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 1280
# Paths
MODEL_LOCAL = "models/Z-Image-Turbo/" # Local path or HuggingFace ID
# We prioritize the local safetensors file for ControlNet weights
CONTROLNET_WEIGHTS = "models/Z-Image-Turbo-Fun-Controlnet-Union.safetensors"
print("Loading Z-Image Turbo model...")
device = "cuda" if torch.cuda.is_available() else "cpu"
weight_dtype = torch.bfloat16
# 1. Load Transformer with Control Config
print("Initializing Transformer...")
transformer = ZImageControlTransformer2DModel.from_pretrained(
MODEL_LOCAL,
subfolder="transformer",
transformer_additional_kwargs={
"control_layers_places": [0, 5, 10, 15, 20, 25],
"control_in_dim": 16
},
).to(device, weight_dtype)
# 2. Load ControlNet Weights manually
if os.path.exists(CONTROLNET_WEIGHTS):
print(f"Loading ControlNet weights from {CONTROLNET_WEIGHTS}")
try:
state_dict = load_file(CONTROLNET_WEIGHTS)
# Handle potential nesting of state_dict
state_dict = state_dict.get("state_dict", state_dict)
m, u = transformer.load_state_dict(state_dict, strict=False)
print(f"ControlNet Weights Loaded - Missing keys: {len(m)}, Unexpected keys: {len(u)}")
except Exception as e:
print(f"Error loading ControlNet weights: {e}")
else:
print(f"Warning: ControlNet weights not found at {CONTROLNET_WEIGHTS}. Trying to run without them or using base weights.")
# 3. Load VAE, Tokenizer, Encoder, Scheduler
print("Loading core components...")
vae = AutoencoderKL.from_pretrained(
MODEL_LOCAL,
subfolder="vae",
).to(device, weight_dtype)
tokenizer = AutoTokenizer.from_pretrained(
MODEL_LOCAL,
subfolder="tokenizer"
)
text_encoder = Qwen3ForCausalLM.from_pretrained(
MODEL_LOCAL,
subfolder="text_encoder",
torch_dtype=weight_dtype,
).to(device)
scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(
MODEL_LOCAL,
subfolder="scheduler"
)
# 4. Assemble Pipeline
pipe = ZImageControlPipeline(
vae=vae,
tokenizer=tokenizer,
text_encoder=text_encoder,
transformer=transformer,
scheduler=scheduler,
)
pipe.to(device, weight_dtype)
print(f"Model loaded successfully on {device}!")
# --- Helper Functions ---
def rescale_image(image, scale, divisible_by=16):
"""Rescale image and ensure dimensions are divisible by specified value."""
if image is None:
return None, 1024, 1024
width, height = image.size
new_width = int(width * scale)
new_height = int(height * scale)
# Make dimensions divisible by divisible_by
new_width = (new_width // divisible_by) * divisible_by
new_height = (new_height // divisible_by) * divisible_by
# Clamp to max size
if new_width > MAX_IMAGE_SIZE:
new_width = MAX_IMAGE_SIZE
if new_height > MAX_IMAGE_SIZE:
new_height = MAX_IMAGE_SIZE
resized = image.resize((new_width, new_height), Image.Resampling.LANCZOS)
return resized, new_width, new_height
def get_image_latent(image, sample_size):
"""Convert PIL image to VAE latent representation."""
import torchvision.transforms as transforms
# Normalize image
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5])
])
img_tensor = transform(image).unsqueeze(0).unsqueeze(2) # [B, C, 1, H, W]
img_tensor = img_tensor.to(device, weight_dtype)
with torch.no_grad():
latent = pipe.vae.encode(img_tensor).latent_dist.sample()
latent = latent * pipe.vae.config.scaling_factor
return latent
@spaces.GPU()
def generate_image(
prompt,
negative_prompt="blurry, ugly, bad quality",
input_image=None,
control_mode="Canny",
control_context_scale=0.75,
image_scale=1.0,
num_inference_steps=9,
guidance_scale=1.0,
seed=42,
randomize_seed=True,
is_polish_prompt=True,
progress=gr.Progress(track_tqdm=True)
):
timestamp = time.time()
if not prompt.strip():
raise gr.Error("Please enter a prompt to generate an image.")
# 1. Polish Prompt
final_prompt = prompt
if is_polish_prompt:
progress(0.1, desc="Polishing prompt...")
try:
final_prompt = polish_prompt(prompt)
except Exception as e:
print(f"Prompt polish failed: {e}")
final_prompt = prompt
# 2. Set Seed
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator(device).manual_seed(seed)
# 3. Process Control Image
if input_image is None:
raise gr.Error("Please upload a control image.")
progress(0.2, desc=f"Processing {control_mode}...")
# Map control mode to processor ID
processor_map = {
'Canny': 'canny',
'HED': 'softedge_hed',
'Depth': 'depth_midas',
'MLSD': 'mlsd',
'Pose': 'openpose_full'
}
processor_id = processor_map.get(control_mode, 'canny')
# Initialize processor
try:
processor = Processor(processor_id)
except Exception as e:
print(f"Failed to load processor {processor_id}, falling back to Canny. Error: {e}")
processor = Processor('canny')
# Resize input for processing
control_image_rescaled, width, height = rescale_image(input_image, image_scale, 16)
# Run Processor (requires resizing to 1024x1024 typically for best results with these models, then back)
temp_image = control_image_rescaled.resize((1024, 1024))
processed_image_pil = processor(temp_image, to_pil=True)
processed_image_pil = processed_image_pil.resize((width, height))
# Convert to Latent
progress(0.4, desc="Encoding control image...")
control_image_latent = get_image_latent(
processed_image_pil,
sample_size=[height, width]
)[:, :, 0]
# 4. Generate
progress(0.5, desc="Generating...")
try:
result = pipe(
prompt=final_prompt,
negative_prompt=negative_prompt,
height=height,
width=width,
generator=generator,
guidance_scale=guidance_scale,
control_image=control_image_latent,
num_inference_steps=num_inference_steps,
control_context_scale=control_context_scale,
)
image = result.images[0]
progress(1.0, desc="Complete!")
return image, seed, processed_image_pil, final_prompt
except Exception as e:
raise gr.Error(f"Generation failed: {str(e)}")
# --- UI Configuration (Apple Style) ---
apple_css = """
.gradio-container {
max-width: 1200px !important;
margin: 0 auto !important;
padding: 48px 20px !important;
font-family: -apple-system, BlinkMacSystemFont, 'Inter', 'Segoe UI', sans-serif !important;
}
.header-container { text-align: center; margin-bottom: 48px; }
.main-title {
font-size: 56px !important; font-weight: 600 !important;
letter-spacing: -0.02em !important; color: #1d1d1f !important;
margin: 0 0 12px 0 !important;
}
.subtitle {
font-size: 21px !important; color: #6e6e73 !important;
margin: 0 0 24px 0 !important;
}
.info-badge {
display: inline-block; background: #0071e3; color: white;
padding: 6px 16px; border-radius: 20px; font-size: 14px;
font-weight: 500; margin-bottom: 16px;
}
textarea {
font-size: 17px !important; border-radius: 12px !important;
border: 1px solid #d2d2d7 !important; padding: 12px 16px !important;
}
textarea:focus {
border-color: #0071e3 !important; box-shadow: 0 0 0 4px rgba(0, 113, 227, 0.15) !important;
outline: none !important;
}
button.primary {
font-size: 17px !important; padding: 12px 32px !important;
border-radius: 980px !important; background: #0071e3 !important;
border: none !important; color: #ffffff !important;
transition: all 0.2s ease !important;
}
button.primary:hover {
background: #0077ed !important; transform: scale(1.02) !important;
}
.footer-text {
text-align: center; margin-top: 48px; font-size: 14px !important;
color: #86868b !important;
}
"""
with gr.Blocks(title="Z-Image Turbo ControlNet", css=apple_css) as demo:
gr.HTML("""
<div class="header-container">
<div class="info-badge">✓ ControlNet Union</div>
<h1 class="main-title">Z-Image Turbo</h1>
<p class="subtitle">Multi-Control Generation with LLM Prompt Polishing</p>
</div>
""")
with gr.Row():
# Left Input Column
with gr.Column(scale=1):
prompt = gr.Textbox(
label="Prompt",
placeholder="Describe the image you want to create...",
lines=3
)
with gr.Row():
is_polish_prompt = gr.Checkbox(label="Polish Prompt with LLM", value=True)
randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
negative_prompt = gr.Textbox(
label="Negative Prompt",
value="blurry, ugly, bad quality",
lines=1
)
input_image = gr.Image(
label="Control Image (Required)",
type="pil",
sources=['upload', 'clipboard'],
height=300
)
control_mode = gr.Radio(
choices=["Canny", "Depth", "HED", "MLSD", "Pose"],
value="Canny",
label="Control Mode",
info="Select the type of structure to extract"
)
with gr.Accordion("Advanced Settings", open=False):
with gr.Row():
num_inference_steps = gr.Slider(label="Steps", minimum=1, maximum=30, step=1, value=9)
guidance_scale = gr.Slider(label="Guidance", minimum=0.0, maximum=10.0, step=0.1, value=1.0)
with gr.Row():
control_context_scale = gr.Slider(label="Control Strength", minimum=0.0, maximum=1.0, step=0.01, value=0.75)
image_scale = gr.Slider(label="Image Scale", minimum=0.5, maximum=2.0, step=0.1, value=1.0)
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42)
generate_btn = gr.Button("Generate Image", variant="primary", elem_classes="primary")
# Right Output Column
with gr.Column(scale=1):
output_image = gr.Image(label="Generated Image", type="pil")
with gr.Accordion("Details & Debug", open=True):
polished_prompt_output = gr.Textbox(label="Actual Polished Prompt", interactive=False, lines=2)
with gr.Row():
seed_output = gr.Number(label="Seed Used", precision=0)
control_output = gr.Image(label="Preprocessor Output", type="pil")
# Footer
gr.HTML("""
<div class="footer-text">
Powered by Z-Image Turbo • VideoX-Fun • Tongyi-MAI
</div>
""")
# Event Wiring
generate_btn.click(
fn=generate_image,
inputs=[
prompt, negative_prompt, input_image, control_mode,
control_context_scale, image_scale, num_inference_steps,
guidance_scale, seed, randomize_seed, is_polish_prompt
],
outputs=[output_image, seed_output, control_output, polished_prompt_output]
)
if __name__ == "__main__":
demo.launch(share=False)