File size: 1,561 Bytes
69b0beb
ec0e8d2
6cee381
 
a766cbf
7bed62a
3531271
a766cbf
f8e211a
a3b39c1
a766cbf
 
 
 
69b0beb
a766cbf
 
 
69b0beb
a766cbf
09edf95
 
 
a766cbf
69b0beb
a766cbf
69b0beb
09b4503
a766cbf
09b4503
37d9a03
a766cbf
69b0beb
a766cbf
b2b249d
f8e211a
a766cbf
 
 
 
 
 
7ac803c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import gradio as gr

import subprocess

subprocess.check_call(["pip", "install", "safetensors"])
subprocess.check_call(["pip", "install", "transformers"])
subprocess.check_call(["pip", "install", "torch"])
subprocess.check_call(["pip", "install", "diffusers"])
subprocess.check_call(["pip", "install", "accelerate"])

import torch
from diffusers import StableDiffusionXLPipeline, UNet2DConditionModel, EulerDiscreteScheduler
from huggingface_hub import hf_hub_download
from safetensors.torch import load_file

base = "stabilityai/stable-diffusion-xl-base-1.0"
repo = "ByteDance/SDXL-Lightning"
ckpt = "sdxl_lightning_2step_unet.safetensors" # Use the correct ckpt for your step setting!

# Load model.
unet = UNet2DConditionModel.from_config(base, subfolder="unet")
unet.load_state_dict(load_file(hf_hub_download(repo, ckpt)))
pipe = StableDiffusionXLPipeline.from_pretrained(base, unet=unet, torch_dtype=torch.float16, variant="fp16")
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing")

def generate_image(text):

    pipe("krishna", num_inference_steps=2, guidance_scale=0).images[0].save("output.png")
    
    return "output.png"

# Create a Gradio interface
iface = gr.Interface(
    fn=generate_image,
    inputs=gr.Textbox(lines=5, label="Enter a description for the image"),
    outputs=gr.Image(type="filepath", label="Generated Image"),
    title="Text to Image Generation",
    description="Enter a text description and get an image.",
    theme="compact"
)

# Launch the Gradio app
iface.launch()