File size: 4,095 Bytes
b5d1316 ceb1278 b5d1316 ceb1278 b5d1316 4b9101a ceb1278 02e391d b5d1316 ceb1278 02e391d ceb1278 b5d1316 4b9101a ceb1278 b5d1316 ceb1278 4b9101a ceb1278 b5d1316 c9e7139 02e391d ceb1278 02e391d ceb1278 b5d1316 02e391d ceb1278 64165f0 ceb1278 64165f0 4b9101a 64165f0 02e391d 64165f0 02e391d 64165f0 02e391d b5d1316 02e391d b5d1316 02e391d ceb1278 4b9101a b5d1316 02e391d b5d1316 ceb1278 b5d1316 ceb1278 02e391d ceb1278 02e391d ceb1278 b5d1316 ceb1278 b5d1316 ceb1278 b5d1316 ceb1278 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 |
import gradio as gr
from diffusers import StableDiffusionPipeline
import torch
import os
from PIL import Image
import warnings
warnings.filterwarnings("ignore")
MODEL_NAME = "stabilityai/stable-diffusion-2"
CACHE_DIR = "./model_cache"
os.makedirs(CACHE_DIR, exist_ok=True)
# Load model with strict CPU settings
pipe = StableDiffusionPipeline.from_pretrained(
MODEL_NAME,
torch_dtype=torch.float32,
cache_dir=CACHE_DIR,
low_cpu_mem_usage=True,
safety_checker=None # Disable safety checker for CPU speed
)
DISCLAIMER = """
⚠️ **Content Warning**
This app generates images based on text prompts. By using this application, you agree to comply with [HuggingFace's Acceptable Use Policy](https://huggingface.co/policies/acceptable-use ).
"""
def generate_image(prompt, width, height, num_inference_steps=20, guidance_scale=7.0):
"""Generate image with strict validation"""
if not prompt.strip():
return None, "Error: Prompt cannot be empty"
try:
# Force integer dimensions
width = int(width) if width else 512
height = int(height) if height else 512
# Validate dimensions (must be divisible by 8)
width = width - (width % 8)
height = height - (height % 8)
with torch.inference_mode():
# Generate with explicit parameter casting
result = pipe(
prompt=str(prompt),
width=int(width),
height=int(height),
num_inference_steps=int(num_inference_steps),
guidance_scale=float(guidance_scale)
)
# Strict output validation
if not hasattr(result, "images") or not isinstance(result.images, list):
return None, f"Invalid pipeline output format: {type(result)}"
if len(result.images) == 0:
return None, "Pipeline returned empty image list"
if result.images[0] is None:
return None, "Pipeline returned None image"
return result.images[0].convert("RGB"), f"Success ({width}x{height})"
except Exception as e:
return None, f"Generation Error: {str(e)}"
# UI Configuration
ASPECT_RATIOS = {
"Square (512x512)": (512, 512),
"Portrait (512x768)": (512, 768),
"Landscape (768x512)": (768, 512),
"Phone (384x832)": (384, 832),
"Custom": None
}
with gr.Blocks(theme="soft", css=".disclaimer {color: #FF4B2B; font-size: 0.9em;}") as demo:
gr.Markdown("# 🔞 Text-to-Image Generator\n" + DISCLAIMER)
with gr.Row():
with gr.Column():
prompt = gr.Textbox(
label="Prompt",
placeholder="Describe your image...",
lines=3
)
with gr.Row():
width = gr.Number(value=512, label="Width", interactive=True)
height = gr.Number(value=512, label="Height", interactive=True)
aspect_ratio = gr.Dropdown(
choices=list(ASPECT_RATIOS.keys()),
value="Square (512x512)",
label="Aspect Ratio"
)
generate_btn = gr.Button("Generate Image", variant="primary")
with gr.Column():
output_image = gr.Image(label="Result", interactive=False)
error_msg = gr.Textbox(label="Status", value="Ready", interactive=False)
def update_dimensions(choice):
if choice == "Custom":
return [gr.Number(value=512, interactive=True), gr.Number(value=512, interactive=True)]
else:
w, h = ASPECT_RATIOS[choice]
return [gr.Number(value=w, interactive=True), gr.Number(value=h, interactive=True)]
aspect_ratio.change(fn=update_dimensions, inputs=[aspect_ratio], outputs=[width, height])
generate_btn.click(
fn=generate_image,
inputs=[prompt, width, height],
outputs=[output_image, error_msg]
)
if __name__ == "__main__":
demo.launch() |