Spaces:
Running
Running
File size: 1,407 Bytes
1b9b37d e50f297 9dc50fa e50f297 9dc50fa e50f297 9dc50fa 1b9b37d 9dc50fa |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 |
import os
import torch
import gradio as gr
from diffusers import StableDiffusionPipeline
MODEL_ID = os.getenv("MODEL_ID", "stabilityai/stable-diffusion-2-1")
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
# -------------------------
# Load Model
# -------------------------
def load_pipeline():
print(f"Loading model: {MODEL_ID} on {DEVICE}")
pipe = StableDiffusionPipeline.from_pretrained(
MODEL_ID,
torch_dtype=torch.float16 if DEVICE == "cuda" else torch.float32
)
pipe = pipe.to(DEVICE)
return pipe
pipe = load_pipeline()
# -------------------------
# Inference Function
# -------------------------
def generate(prompt):
if not prompt or prompt.strip() == "":
return "Please enter a valid prompt.", None
print("Running inference...")
result = pipe(
prompt=prompt,
num_inference_steps=25,
guidance_scale=7.5
)
image = result.images[0]
return f"Generated image for: {prompt}", image
# -------------------------
# Gradio UI
# -------------------------
interface = gr.Interface(
fn=generate,
inputs=gr.Textbox(label="Prompt", placeholder="Enter your image prompt..."),
outputs=[gr.Textbox(label="Status"), gr.Image(label="Generated Image")],
title="Prompt Image Editor",
description="Generate AI images using text prompts.",
)
if __name__ == "__main__":
interface.launch()
|