File size: 3,100 Bytes
d7f40e3
439649f
d7f40e3
 
e9ce74f
439649f
66f569d
 
439649f
 
 
 
 
 
 
e9ce74f
 
 
 
 
 
 
 
 
 
439649f
 
 
d7f40e3
e9ce74f
 
 
 
439649f
d7f40e3
e9ce74f
d7f40e3
e9ce74f
d7f40e3
 
 
 
439649f
 
d7f40e3
 
e9ce74f
 
 
 
439649f
e9ce74f
ca35c84
 
 
 
 
 
 
 
 
 
 
 
439649f
 
 
 
e9ce74f
 
439649f
e9ce74f
d7f40e3
 
439649f
 
 
d9dc65b
 
439649f
 
e9ce74f
d7f40e3
e9ce74f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
import gradio as gr
from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler, EulerDiscreteScheduler
import torch

# Загрузка модели
def load_model(model_id, scheduler_name):
    pipe = StableDiffusionPipeline.from_pretrained(model_id) #torch_dtype=torch.float16
    pipe.to("cpu") #gpu 

    # Установка шедулера
    if scheduler_name == "DPM":
        pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
    elif scheduler_name == "Euler":
        pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)

    return pipe

# Генерация изображения
def generate_image(
    model_id: str,
    prompt: str,
    negative_prompt: str,
    seed: int,
    guidance_scale: float,
    num_inference_steps: int,
    scheduler_name: str,
    height: int,
    width: int
):
    # Установка начального состояния (seed)
    generator = torch.manual_seed(seed)
    
    # Загрузка модели
    pipe = load_model(model_id, scheduler_name)

    # Генерация
    image = pipe(
        prompt,
        negative_prompt=negative_prompt,
        guidance_scale=guidance_scale,
        num_inference_steps=num_inference_steps,
        generator=generator,
        height=height,
        width=width
    ).images[0]

    return image

# Интерфейс Gradio
with gr.Blocks() as demo:
    gr.Markdown("## Домашнее задание 3. Часть 1. Знакомство с Gradio и HuggingFace.")
    
    with gr.Row():
            model_id = gr.Textbox(label="Model ID", value="CompVis/stable-diffusion-v1-4")
            prompt = gr.Textbox(label="Prompt", placeholder="Enter your prompt here")
            negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="Enter negative prompt here")
            seed = gr.Number(label="Seed", value=42, precision=0)
            guidance_scale = gr.Slider(label="Guidance Scale", minimum=1, maximum=20, value=7)
            num_inference_steps = gr.Slider(label="Number of Inference Steps", minimum=1, maximum=50, value=20)
            scheduler_name = gr.Dropdown(label='Sheduler', choices=["DPM", "Euler"], value="DPM")
            heigth = gr.Slider(label="Heigth", minimum=256, maximum=1024, step=64, value=512)
            width = gr.Slider(label="Width", minimum=256, maximum=1024, step=64, value=512)
            output = gr.Image(label="Generated Image")
            submit = gr.Button("Generate")

    def reset_inputs():
        return "", "", 42, 7, 20, "DPM", 512, 512, None

    submit.click(
        fn=generate_image,
        inputs=[model_id, prompt, negative_prompt, seed, guidance_scale, num_inference_steps, scheduler_name, heigth, width],
        outputs=output,
    )

    next_generation = gr.Button("Next generation")
    next_generation.click(
        fn=reset_inputs,
        inputs=[],
        outputs=[prompt, negative_prompt, seed, guidance_scale, num_inference_steps, scheduler_name, heigth, width],
    )

# Запуск
if __name__ == "__main__":
    demo.launch()