NeelTA's picture
using runwayml/stable-diffusion-v1-5
85c93d5
import gradio as gr
import torch
from diffusers import OnnxStableDiffusionPipeline
import os
# Use CPU optimized model
os.environ["HF_HOME"] = "/tmp/hf_cache"
# Using a proper ONNX model for CPU inference
MODEL_ID = "runwayml/stable-diffusion-v1-5"
print(f"Loading ONNX model {MODEL_ID}...")
try:
# Use ONNX pipeline for better CPU performance
pipe = OnnxStableDiffusionPipeline.from_pretrained(
MODEL_ID,
revision="onnx",
provider="CPUExecutionProvider",
torch_dtype=torch.float32,
low_cpu_mem_usage=True
)
pipe.safety_checker = None # Disable safety checker for simplicity
pipe.set_progress_bar_config(disable=True)
print("ONNX Model loaded successfully!")
except Exception as e:
print(f"Failed to load ONNX model: {e}")
print("Falling back to regular Stable Diffusion pipeline...")
from diffusers import StableDiffusionPipeline
pipe = StableDiffusionPipeline.from_pretrained(
MODEL_ID,
torch_dtype=torch.float32,
low_cpu_mem_usage=True
)
pipe = pipe.to("cpu")
pipe.safety_checker = None
pipe.set_progress_bar_config(disable=True)
print("Model loaded!")
def generate(prompt):
try:
result = pipe(
prompt=prompt,
num_inference_steps=40,
num_images_per_prompt=1,
guidance_scale=7.5
)
return result.images[0]
except Exception as e:
print(f"Error during generation: {e}")
return None
interface = gr.Interface(
fn=generate,
inputs=gr.Textbox(lines=3, label="Prompt", placeholder="Enter your image prompt here..."),
outputs=gr.Image(type="pil", label="Generated Image"),
title="⚡ ONNX Stable Diffusion",
description="Faster CPU-based image generation with ONNX optimization"
)
interface.launch()