akhaliq's picture
akhaliq HF Staff
Upload folder using huggingface_hub
b23e651 verified
import spaces
import gradio as gr
import torch
import random
import os
from PIL import Image
# Import from your existing modules
from prompt_check import is_unsafe_prompt
from pe import prompt_template
# Model configuration
MODEL_PATH = os.environ.get("MODEL_PATH", "Tongyi-MAI/Z-Image-Turbo")
ENABLE_COMPILE = os.environ.get("ENABLE_COMPILE", "true").lower() == "true"
ENABLE_WARMUP = os.environ.get("ENABLE_WARMUP", "true").lower() == "true"
ATTENTION_BACKEND = os.environ.get("ATTENTION_BACKEND", "flash_3")
UNSAFE_MAX_NEW_TOKEN = int(os.environ.get("UNSAFE_MAX_NEW_TOKEN", "10"))
DASHSCOPE_API_KEY = os.environ.get("DASHSCOPE_API_KEY")
HF_TOKEN = os.environ.get("HF_TOKEN")
UNSAFE_PROMPT_CHECK = os.environ.get("UNSAFE_PROMPT_CHECK"))
RESOLUTION_SET = [
"1024x1024 (1:1)", "1152x896 (9:7)", "896x1152 (7:9)",
"1152x864 (4:3)", "864x1152 (3:4)",
"1248x832 (3:2)", "832x1248 (2:3)",
"1280x720 (16:9)", "720x1280 (9:16)",
"1344x576 (21:9)", "576x1344 (9:21)"
]
EXAMPLE_PROMPTS = [
["一位男士和他的贵宾犬穿着配套的服装参加狗狗秀,室内灯光,背景中有观众。"],
["极具氛围感的暗调人像,一位优雅的中国美女在黑暗的房间里。一束强光通过遮光板,在她的脸上投射出一个清晰的闪电形状的光影,正好照亮一只眼睛。高对比度,明暗交界清晰,神秘感,莱卡相机色调。"],
["一张中景手机自拍照片拍摄了一位留着长黑发的年轻东亚女子在灯光明亮的电梯内对着镜子自拍。她穿着一件带有白色花朵图案的黑色露肩短上衣和深色牛仔裤。她的头微微倾斜,嘴唇嘟起做亲吻状,非常可爱俏皮。她右手拿着一部深灰色智能手机,遮住了部分脸,后置摄像头镜头对着镜子"]
]
# Global variables
pipe = None
prompt_expander = None
def load_models(model_path, enable_compile=False, attention_backend="native"):
"""Load the Z-Image pipeline with simplified error handling"""
print(f"Loading model from {model_path}...")
# Simplified model loading - in practice you'd use your actual model loading code
from diffusers import ZImagePipeline
pipe = ZImagePipeline.from_pretrained(model_path, torch_dtype=torch.bfloat16).to("cuda")
return pipe
def warmup_model(pipe, resolutions):
"""Quick warmup with minimal iterations"""
print("Quick warmup...")
try:
generate_image(
pipe,
prompt="warmup",
resolution="1024x1024",
seed=42,
num_inference_steps=5,
)
except Exception as e:
print(f"Warmup note: {e}")
print("Ready.")
@spaces.GPU
def generate(
prompt,
resolution="1024x1024 (1:1)",
seed=42,
steps=9,
shift=3.0,
random_seed=True
):
"""Generate image with simplified parameters"""
if not prompt.strip():
raise gr.Error("Please enter a prompt")
# For demo purposes, generate a placeholder
# In production, this would call your actual generation pipeline
width, height = 1024, 1024 # Simplified resolution parsing
if random_seed:
seed = random.randint(1, 1000000)
# Create a simple gradient image
image = Image.new("RGB", (width, height))
for x in range(width):
for y in range(height):
r = int((x / width) * 255)
g = int((y / height) * 255)
b = int((x + y) / (width + height) * 255)
image.putpixel((x, y), (r, g, b))
return image
def init_app():
"""Initialize the application with simplified setup"""
global pipe
try:
pipe = load_models(MODEL_PATH, enable_compile=ENABLE_COMPILE)
if ENABLE_WARMUP:
warmup_model(pipe, RESOLUTION_SET)
print("✓ Model loaded successfully")
except Exception as e:
print(f"✗ Model loading issue: {e}")
pipe = None
def create_ui():
"""Create a modern, minimalist UI"""
with gr.Blocks(
title="Z-Image Turbo - AI Image Generator",
theme=gr.themes.Soft(),
css="""
.compact-row { gap: 0.5rem !important; }
.mobile-optimized { max-width: 100% !important; }
.card { border-radius: 12px !important; padding: 1.5rem !important; }
.prompt-box textarea { min-height: 80px !important; }
.gradio-container { max-width: 1200px !important; margin: auto !important; }
.gradio-header { text-align: center !important; margin-bottom: 1rem !important; }
"""
) as demo:
# Header Section
with gr.Row(elem_classes=["mobile-optimized"]):
gr.Markdown("""
<div style="text-align: center;">
<h1 style="margin: 0; font-size: 1.8rem; color: #1a1a1a;">
<span style="color: #6366f1;">Z</span>-Image Turbo
</h1>
<p style="margin: 0.5rem 0 1rem 0; color: #6b7280; font-size: 1rem;">
Efficient AI Image Generation
</p>
</div>
""")
# Main Content - Single Column Layout for Mobile
with gr.Column(elem_classes=["mobile-optimized"]):
# Prompt Input
with gr.Group(elem_classes=["card"]):
gr.Markdown("**✨ Describe your vision**")
prompt_input = gr.Textbox(
label="",
placeholder="A serene Chinese landscape with mountains and mist...",
lines=3,
max_lines=6,
elem_id="prompt-input"
)
# Generation Settings - Compact Layout
with gr.Row(elem_classes=["compact-row"]):
resolution = gr.Dropdown(
choices=RESOLUTION_SET,
value="1024x1024 (1:1)",
label="Resolution",
elem_classes=["mobile-optimized"]
)
# Seed Control
with gr.Row(elem_classes=["compact-row"]):
seed_input = gr.Number(
label="Seed",
value=42,
precision=0
)
# Action Buttons
with gr.Row(elem_classes=["compact-row"]):
generate_btn = gr.Button(
"Generate Image",
variant="primary",
size="lg",
elem_classes=["mobile-optimized"]
)
# Examples Section
with gr.Accordion("📝 Example Prompts", open=False):
gr.Examples(
examples=EXAMPLE_PROMPTS,
inputs=prompt_input,
label=""
)
# Output Gallery
with gr.Group(elem_classes=["card"]):
gr.Markdown("**🖼 Generated Images**")
output_gallery = gr.Gallery(
label="",
columns=[1, 2], # Responsive columns
rows=2,
height=500,
object_fit="contain",
format="png"
)
# Define interactions
generate_btn.click(
generate,
inputs=[prompt_input, resolution, seed_input],
outputs=output_gallery,
api_visibility="public"
)
return demo
# Initialize the application
init_app()
# Create and launch the UI
demo = create_ui()
if __name__ == "__main__":
demo.launch(
share=True,
footer_links=[{"label": "Built with anycoder", "url": "https://huggingface.co/spaces/akhaliq/anycoder"]
)