File size: 1,426 Bytes
b5a6313
 
 
 
c6db39f
09fc204
c6db39f
b5a6313
c6db39f
 
 
947e82f
 
c6db39f
 
 
947e82f
c6db39f
 
947e82f
 
 
c6db39f
947e82f
c6db39f
 
947e82f
c6db39f
 
 
947e82f
c6db39f
 
 
947e82f
c6db39f
 
947e82f
c6db39f
947e82f
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
import gradio as gr
import torch
from diffusers import StableDiffusionPipeline

# Load smaller, faster model
model_id = "runwayml/stable-diffusion-v1-5"
pipe = StableDiffusionPipeline.from_pretrained(model_id).to("cpu")

# Inference function with portrait image size
def generate(prompt, negative, steps, scale, seed):
    generator = torch.Generator("cpu").manual_seed(seed)
    image = pipe(
        prompt=prompt,
        negative_prompt=negative,
        height=768,
        width=512,
        num_inference_steps=steps,
        guidance_scale=scale,
        generator=generator,
    ).images[0]
    return image

# Build Gradio UI
with gr.Blocks() as demo:
    gr.Markdown("### 🎨 Stable Diffusion 1.4 (CPU Optimized Portrait Generator)")
    
    with gr.Row():
        prompt = gr.Textbox(label="Prompt", placeholder="e.g. A fantasy castle on a cliff")
        negative = gr.Textbox(label="Negative Prompt", placeholder="e.g. low quality, blurry")
    
    with gr.Row():
        steps = gr.Slider(10, 50, value=20, label="Steps")
        scale = gr.Slider(1, 20, value=7.5, step=0.1, label="Guidance Scale")
        seed = gr.Slider(0, 100000, step=1, value=42, label="Seed", randomize=True)

    run_btn = gr.Button("🎨 Generate Portrait")
    output = gr.Image(label="Result", type="pil")

    run_btn.click(fn=generate, inputs=[prompt, negative, steps, scale, seed], outputs=output)

demo.launch(show_api=True)