Code-zalov / app.py
Shinhati2023's picture
Rename app.p to app.py
6835cd6 verified
import gradio as gr
import torch
from diffusers import ControlNetModel, DiffusionPipeline
# 1. Load the Union ControlNet (Alibaba's exact HuggingFace repo)
controlnet = ControlNetModel.from_pretrained(
"alibaba-pai/Z-Image-Turbo-Fun-Controlnet-Union-2.1",
torch_dtype=torch.bfloat16
)
# 2. Load the main Z-Image Turbo model
pipe = DiffusionPipeline.from_pretrained(
"alibaba-pai/Z-Image-Turbo",
controlnet=controlnet,
torch_dtype=torch.bfloat16
)
# Send it to the GPU (the hardware monitored by your nvidia-smi command)
pipe.to("cuda")
def generate_from_sketch(sketch, prompt):
if not sketch:
return None
# Gradio handles sketch inputs slightly differently depending on the version.
# This safely extracts the image whether it's a dict or a direct PIL object.
if isinstance(sketch, dict):
control_image = sketch.get("composite", sketch.get("image")).convert("RGB")
else:
control_image = sketch.convert("RGB")
# Z-Turbo renders in just 8 steps
image = pipe(
prompt=prompt,
image=control_image,
num_inference_steps=8,
controlnet_conditioning_scale=0.85
).images[0]
return image
# 3. Build the UI
with gr.Blocks(theme=gr.themes.Monochrome()) as demo:
gr.Markdown("# MegaGenz: Sketch-to-Reality")
gr.Markdown("Draw your idea below. This is rendering directly on the dedicated GPU.")
with gr.Row():
with gr.Column():
prompt_input = gr.Textbox(
label="Prompt",
value="A hyper-realistic medieval knight in heavy steel armor, standing guard in a foggy stone castle courtyard, cinematic lighting, highly detailed"
)
# The canvas for you to finger-paint on via your phone
sketch_input = gr.Sketchpad(label="Draw your sketch here", type="pil")
btn = gr.Button("Render on GPU", variant="primary")
with gr.Column():
output_image = gr.Image(label="Final Render")
btn.click(
fn=generate_from_sketch,
inputs=[sketch_input, prompt_input],
outputs=output_image
)
# 4. The Magic Link: Bind to the exact port from your screenshot
# Setting it to 0.0.0.0 exposes it, and port 8080 routes it directly to n3.abao.ai
demo.launch(server_name="0.0.0.0", server_port=8080)