File size: 1,540 Bytes
a98ebd8
a17e33e
dc03b48
a17e33e
 
a98ebd8
a17e33e
891584b
 
 
 
 
 
 
 
a98ebd8
a17e33e
 
891584b
a17e33e
 
891584b
a17e33e
 
 
a98ebd8
a17e33e
 
891584b
 
44552fa
a17e33e
 
891584b
 
 
 
 
 
 
045ba62
891584b
 
a98ebd8
a17e33e
a98ebd8
 
891584b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import gradio as gr
from optimum.intel import OVStableDiffusionPipeline

# 1. Load the OpenVINO optimized model
model_id = "OpenVINO/stable-diffusion-v1-5-int8-ov"

print("Loading model to CPU... (This may take a minute)")

# We explicitly disable the safety checker to avoid the OSError
pipe = OVStableDiffusionPipeline.from_pretrained(
    model_id, 
    compile=True,
    safety_checker=None,
    requires_safety_checker=False
)

# 2. Define the Generation Function
def generate_image(prompt, steps):
    # For CPU, 4-8 steps is the 'sweet spot' for speed vs quality
    image = pipe(
        prompt=prompt, 
        num_inference_steps=int(steps),
        guidance_scale=7.5
    ).images[0]
    return image

# 3. Gradio UI
with gr.Blocks(theme=gr.themes.Soft()) as demo:
    gr.Markdown("# ⚡ CPU-Powered AI Image Generator")
    gr.Markdown("Running on Intel OpenVINO (Optimized for Hugging Face Free Tier)")
    
    with gr.Row():
        with gr.Column():
            prompt = gr.Textbox(
                label="What do you want to see?", 
                placeholder="A futuristic city, oil painting style",
                lines=3
            )
            steps = gr.Slider(1, 12, value=4, step=1, label="Inference Steps (Lower = Faster)")
            btn = gr.Button("Generate Image", variant="primary")
        
        with gr.Column():
            output_img = gr.Image(label="Your Generated Image")

    btn.click(fn=generate_image, inputs=[prompt, steps], outputs=output_img)

if __name__ == "__main__":
    demo.launch()