pva22
fix
05a8fb9
import gradio as gr
from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler, EulerDiscreteScheduler
import torch
# Загрузка модели
def load_model(model_id, scheduler_name):
pipe = StableDiffusionPipeline.from_pretrained(model_id) #torch_dtype=torch.float16
pipe.to("cpu") #gpu
# Установка шедулера
if scheduler_name == "DPM":
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
elif scheduler_name == "Euler":
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
return pipe
# Генерация изображения
def generate_image(
model_id: str,
prompt: str,
negative_prompt: str,
seed: int,
guidance_scale: float,
num_inference_steps: int,
scheduler_name: str,
height: int,
width: int
):
# Установка начального состояния (seed)
generator = torch.manual_seed(seed)
# Загрузка модели
pipe = load_model(model_id, scheduler_name)
# Генерация
image = pipe(
prompt,
negative_prompt=negative_prompt,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
generator=generator,
height=height,
width=width
).images[0]
return image
# Интерфейс Gradio
with gr.Blocks() as demo:
gr.Markdown("## Домашнее задание 3. Часть 1. Знакомство с Gradio и HuggingFace.")
with gr.Row():
model_id = gr.Textbox(label="Model ID", value="CompVis/stable-diffusion-v1-4")
prompt = gr.Textbox(label="Prompt", placeholder="Enter your prompt here")
negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="Enter negative prompt here")
seed = gr.Number(label="Seed", value=42, precision=0)
guidance_scale = gr.Slider(label="Guidance Scale", minimum=1, maximum=20, value=7)
num_inference_steps = gr.Slider(label="Number of Inference Steps", minimum=1, maximum=50, value=20)
scheduler_name = gr.Dropdown(label='Sheduler', choices=["DPM", "Euler"], value="DPM")
heigth = gr.Slider(label="Heigth", minimum=256, maximum=1024, step=64, value=512)
width = gr.Slider(label="Width", minimum=256, maximum=1024, step=64, value=512)
output = gr.Image(label="Generated Image")
submit = gr.Button("Generate")
def reset_inputs():
return "", "", 42, 7, 20, "DPM", 512, 512, None
submit.click(
fn=generate_image,
inputs=[model_id, prompt, negative_prompt, seed, guidance_scale, num_inference_steps, scheduler_name, heigth, width],
outputs=output,
)
next_generation = gr.Button("Next generation")
next_generation.click(
fn=reset_inputs,
inputs=[],
outputs=[prompt, negative_prompt, seed, guidance_scale, num_inference_steps, scheduler_name, heigth, width],
)
# Запуск
if __name__ == "__main__":
demo.launch()