bayndrysf's picture
Update app.py
188b1eb verified
import os
os.environ["GRADIO_DISABLE_OAUTH"] = "1"
import gradio as gr
import numpy as np
import random
import torch
from diffusers import DiffusionPipeline
from PIL import Image
# ---------------------------------------------------------
# Device
# ---------------------------------------------------------
device = "cuda" if torch.cuda.is_available() else "cpu"
dtype = torch.float16 if device == "cuda" else torch.float32
# ---------------------------------------------------------
# Models
# ---------------------------------------------------------
BASE_MODEL = "stabilityai/stable-diffusion-2-1"
LORA_PATH = "bayndrysf/dreambooth-project-style"
# ---------------------------------------------------------
# Load pipeline
# ---------------------------------------------------------
pipe = DiffusionPipeline.from_pretrained(
BASE_MODEL,
torch_dtype=dtype,
use_auth_token=True
)
if device == "cuda":
pipe.enable_xformers_memory_efficient_attention()
pipe.to(device)
pipe.load_lora_weights(
LORA_PATH,
weight_name="pytorch_lora_weights.safetensors"
)
# ---------------------------------------------------------
# Constants
# ---------------------------------------------------------
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 1024
# ---------------------------------------------------------
# Inference
# ---------------------------------------------------------
def infer(
prompt,
negative_prompt,
seed,
randomize_seed,
width,
height,
guidance_scale,
num_inference_steps
):
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator(device=device).manual_seed(seed)
image = pipe(
prompt=prompt,
negative_prompt=negative_prompt if negative_prompt else None,
width=width,
height=height,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
generator=generator,
num_images_per_prompt=1
).images[0]
return image
# ---------------------------------------------------------
# Examples
# ---------------------------------------------------------
examples = [
"A whirling dervish performing in a historic Istanbul courtyard, captured in the iconic style of Ara Güler.",
"An elderly man sipping tea at a street café in Istanbul, captured in the iconic style of Ara Güler.",
"A group of friends enjoying a ferry ride on the Bosphorus, captured in the iconic style of Ara Güler.",
]
css = """
#col-container {
margin: 0 auto;
max-width: 520px;
}
"""
power_device = "GPU" if device == "cuda" else "CPU"
# ---------------------------------------------------------
# Gradio UI
# ---------------------------------------------------------
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(f"""
# Ara Güler's Istanbul: Image Generation with Stable Diffusion
Currently running on **{power_device}**
""")
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
container=False,
)
run_button = gr.Button("Run", scale=0)
result = gr.Image(label="Result", show_label=False)
with gr.Accordion("Advanced Settings", open=False):
negative_prompt = gr.Text(
label="Negative prompt",
max_lines=1,
placeholder="Enter a negative prompt",
)
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
randomize_seed = gr.Checkbox(
label="Randomize seed",
value=True
)
with gr.Row():
width = gr.Slider(
label="Width",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=512,
)
height = gr.Slider(
label="Height",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=512,
)
with gr.Row():
guidance_scale = gr.Slider(
label="Guidance scale",
minimum=1.0,
maximum=10.0,
step=0.1,
value=7.5,
)
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=10,
maximum=50,
step=1,
value=25,
)
gr.Examples(
examples=examples,
inputs=[prompt]
)
run_button.click(
fn=infer,
inputs=[
prompt,
negative_prompt,
seed,
randomize_seed,
width,
height,
guidance_scale,
num_inference_steps
],
outputs=[result]
)
demo.queue().launch()