Meaning-Machine's picture
added image.save() using slugify and mkdir
79c23e4 verified
import torch
from diffusers import DiffusionPipeline, AutoencoderKL
import gradio as gr
from pathlib import Path
from slugify import slugify
# Function to load the model.
def load_model():
pipeline = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0")
pipeline.load_lora_weights("Meaning-Machine/old_mike_stasny_LoRA")
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
pipe = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0",
vae=vae,
torch_dtype=torch.float16,
variant="fp16",
use_safetensors=True
)
pipe.load_lora_weights("Meaning-Machine/old_mike_stasny_LoRa")
_ = pipe.to("cuda")
return pipe
def generate_image(prompt, num_inference_steps):
pipe = load_model()
if not isinstance(prompt, str):
raise ValueError("The prompt should be a string.")
if not isinstance(num_inference_steps, str):
raise ValueError("The number of inference steps should be a string.")
# Convert num_inference_steps to an integer for the model call
num_inference_steps_int = int(num_inference_steps)
image = pipe(prompt, num_inference_steps=num_inference_steps_int).images[0]
# save the generated image
DIR_NAME="./images/"
dirpath = Path(DIR_NAME)
# create parent dir if doesn't exist
dirpath.mkdir(parents=True, exist_ok=True)
# create filename for image based on prompt
image_name = f'{slugify(prompt)}.jpg'
image_path = dirpath / image_name
image.save(image_path)
print(image_name)
return image
iface = gr.Interface(
fn=generate_image,
inputs=[
gr.Textbox(label="Enter a prompt for the image"),
gr.Textbox(label="Number of Inference Steps min=10 or max=50")
],
outputs="image",
title="Stable Diffusion XL Text2Image Finetune Dreambooth",
description="Generate images in the style of Mike Stasny from text prompts using Fine-Tuned Stable Diffusion XL."
)
# Launch the Gradio app
iface.launch(share=True)