pva22 commited on
Commit
8ec434c
·
1 Parent(s): 6385f26

zero gpu usage activate

Browse files
Files changed (1) hide show
  1. app.py +64 -116
app.py CHANGED
@@ -2,133 +2,76 @@ import gradio as gr
2
  import numpy as np
3
  import random
4
 
5
- import spaces #[uncomment to use ZeroGPU]
6
  from diffusers import DiffusionPipeline
7
  import torch
 
8
 
9
  from peft import PeftModel, LoraConfig
10
- import os
11
 
12
- def get_lora_sd_pipeline(
13
- ckpt_dir='./lora',
14
- base_model_name_or_path=None,
15
- dtype=torch.float16,
16
- adapter_name="default"
17
- ):
18
 
 
 
 
 
 
 
19
  unet_sub_dir = os.path.join(ckpt_dir, "unet")
20
  text_encoder_sub_dir = os.path.join(ckpt_dir, "text_encoder")
21
-
22
- if os.path.exists(text_encoder_sub_dir) and base_model_name_or_path is None:
23
- config = LoraConfig.from_pretrained(text_encoder_sub_dir)
24
- base_model_name_or_path = config.base_model_name_or_path
25
-
26
- if base_model_name_or_path is None:
27
- raise ValueError("Please specify the base model name or path")
28
-
29
- pipe = DiffusionPipeline.from_pretrained(base_model_name_or_path, torch_dtype=dtype)
30
- before_params = pipe.unet.parameters()
31
- pipe.unet = PeftModel.from_pretrained(pipe.unet, unet_sub_dir, adapter_name=adapter_name)
32
- pipe.unet.set_adapter(adapter_name)
33
- after_params = pipe.unet.parameters()
34
- print("Parameters changed:", any(torch.any(b != a) for b, a in zip(before_params, after_params)))
35
-
36
- if os.path.exists(text_encoder_sub_dir):
37
- pipe.text_encoder = PeftModel.from_pretrained(pipe.text_encoder, text_encoder_sub_dir, adapter_name=adapter_name)
38
-
39
- if dtype in (torch.float16, torch.bfloat16):
40
- pipe.unet.half()
41
- pipe.text_encoder.half()
42
-
43
- return pipe
44
-
45
- def process_prompt(prompt, tokenizer, text_encoder, max_length=77):
46
- tokens = tokenizer(prompt, truncation=False, return_tensors="pt")["input_ids"]
47
- chunks = [tokens[:, i:i + max_length] for i in range(0, tokens.shape[1], max_length)]
48
-
49
- with torch.no_grad():
50
- embeds = [text_encoder(chunk.to(text_encoder.device))[0] for chunk in chunks]
51
-
52
- return torch.cat(embeds, dim=1)
53
-
54
- def align_embeddings(prompt_embeds, negative_prompt_embeds):
55
- max_length = max(prompt_embeds.shape[1], negative_prompt_embeds.shape[1])
56
- return torch.nn.functional.pad(prompt_embeds, (0, 0, 0, max_length - prompt_embeds.shape[1])), \
57
- torch.nn.functional.pad(negative_prompt_embeds, (0, 0, 0, max_length - negative_prompt_embeds.shape[1]))
58
-
59
- device = "cuda" if torch.cuda.is_available() else "cpu"
60
 
61
- model_id_default = "sd-legacy/stable-diffusion-v1-5"
62
- model_dropdown = ['stabilityai/sdxl-turbo', 'CompVis/stable-diffusion-v1-4', 'sd-legacy/stable-diffusion-v1-5']
63
 
64
- model_lora_default = "lora"
 
 
65
 
66
- if torch.cuda.is_available():
67
- torch_dtype = torch.float16
68
- else:
69
- torch_dtype = torch.float32
70
 
71
- MAX_SEED = np.iinfo(np.int32).max
72
- MAX_IMAGE_SIZE = 1024
73
 
74
 
75
- @spaces.GPU #[uncomment to use ZeroGPU]
76
  def infer(
77
  prompt,
78
- negative_prompt,
79
- randomize_seed,
80
  width=512,
81
  height=512,
82
- model_repo_id=model_id_default,
83
  seed=42,
84
  guidance_scale=7,
85
  num_inference_steps=20,
86
- model_lora_id=model_lora_default,
87
  lora_scale=0.5,
88
- progress=gr.Progress(track_tqdm=True),
89
- ):
90
-
91
  if randomize_seed:
92
- seed = random.randint(0, MAX_SEED)
93
-
94
- generator = torch.Generator().manual_seed(seed)
95
-
96
- # добавляем обновление pipe по условию
97
- if model_repo_id != model_id_default:
98
- pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype).to(device)
99
- prompt_embeds = process_prompt(prompt, pipe.tokenizer, pipe.text_encoder)
100
- negative_prompt_embeds = process_prompt(negative_prompt, pipe.tokenizer, pipe.text_encoder)
101
- prompt_embeds, negative_prompt_embeds = align_embeddings(prompt_embeds, negative_prompt_embeds)
102
  else:
103
- # добавляем lora
104
- pipe = get_lora_sd_pipeline(ckpt_dir='./' + model_lora_id, base_model_name_or_path=model_id_default, dtype=torch_dtype).to(device)
105
- prompt_embeds = process_prompt(prompt, pipe.tokenizer, pipe.text_encoder)
106
- negative_prompt_embeds = process_prompt(negative_prompt, pipe.tokenizer, pipe.text_encoder)
107
- prompt_embeds, negative_prompt_embeds = align_embeddings(prompt_embeds, negative_prompt_embeds)
108
- print(f"LoRA adapter loaded: {pipe.unet.active_adapters}")
109
- print(f"LoRA scale applied: {lora_scale}")
110
  pipe.fuse_lora(lora_scale=lora_scale)
111
 
112
- # на вызов pipe с эмбеддингами
113
- params = {
114
- 'prompt_embeds': prompt_embeds,
115
- 'negative_prompt_embeds': negative_prompt_embeds,
116
- 'guidance_scale': guidance_scale,
117
- 'num_inference_steps': num_inference_steps,
118
- 'width': width,
119
- 'height': height,
120
- 'generator': generator,
121
- }
122
-
123
- return pipe(**params).images[0], seed
124
-
125
-
126
- examples = [
127
- "A Elon Mask lady in a Russian embroidered kaftan is sitting on a beautiful carved veranda, holding a cup to her mouth and drinking tea from the cup. With her other hand, the girl holds a saucer. The cup and saucer are painted with gzhel. Next to the girl on the table stands a samovar, and steam can be seen above it.",
128
- "Elon Mask in a jungle, cold color palette, muted colors, detailed, 8k",
129
- "An Elon Mask astronaut riding a green horse",
130
- "A delicious Elon Mask ceviche cheesecake slice",
131
- ]
132
 
133
  css = """
134
  #col-container {
@@ -139,20 +82,25 @@ css = """
139
 
140
  with gr.Blocks(css=css) as demo:
141
  with gr.Column(elem_id="col-container"):
142
- gr.Markdown(" # Text-to-Image")
143
-
144
- with gr.Row():
145
- prompt = gr.Text(
146
- label="Prompt",
147
- show_label=False,
148
- max_lines=1,
149
- placeholder="Enter your prompt",
150
- container=False,
151
- )
152
-
153
- run_button = gr.Button("Run", scale=0, variant="primary")
154
-
155
- result = gr.Image(label="Result", show_label=False)
 
 
 
 
 
156
 
157
  if __name__ == "__main__":
158
- demo.launch()
 
2
  import numpy as np
3
  import random
4
 
 
5
  from diffusers import DiffusionPipeline
6
  import torch
7
+ import os
8
 
9
  from peft import PeftModel, LoraConfig
 
10
 
11
+ device = "cuda" if torch.cuda.is_available() else "cpu"
 
 
 
 
 
12
 
13
+ def get_lora_sd_pipeline(
14
+ ckpt_dir="./lora",
15
+ base_model_name_or_path="sd-legacy/stable-diffusion-v1-5",
16
+ dtype=torch.float16,
17
+ adapter_name="default",
18
+ ):
19
  unet_sub_dir = os.path.join(ckpt_dir, "unet")
20
  text_encoder_sub_dir = os.path.join(ckpt_dir, "text_encoder")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
+ pipe = DiffusionPipeline.from_pretrained(base_model_name_or_path, torch_dtype=dtype).to(device)
 
23
 
24
+ if os.path.exists(unet_sub_dir):
25
+ pipe.unet = PeftModel.from_pretrained(pipe.unet, unet_sub_dir, adapter_name=adapter_name)
26
+ print(f"LoRA adapter loaded: {adapter_name}")
27
 
28
+ if os.path.exists(text_encoder_sub_dir):
29
+ pipe.text_encoder = PeftModel.from_pretrained(pipe.text_encoder, text_encoder_sub_dir, adapter_name=adapter_name)
 
 
30
 
31
+ return pipe
 
32
 
33
 
 
34
  def infer(
35
  prompt,
36
+ negative_prompt="",
37
+ randomize_seed=False,
38
  width=512,
39
  height=512,
40
+ model_repo_id="sd-legacy/stable-diffusion-v1-5",
41
  seed=42,
42
  guidance_scale=7,
43
  num_inference_steps=20,
44
+ model_lora_id="lora",
45
  lora_scale=0.5,
46
+ ):
 
 
47
  if randomize_seed:
48
+ seed = random.randint(0, np.iinfo(np.int32).max)
49
+
50
+ generator = torch.manual_seed(seed)
51
+
52
+ # Загружаем основную модель или с LoRA
53
+ if model_lora_id != "none":
54
+ pipe = get_lora_sd_pipeline(ckpt_dir=f"./{model_lora_id}", base_model_name_or_path=model_repo_id)
 
 
 
55
  else:
56
+ pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch.float16).to(device)
57
+
58
+ # Применяем LoRA, если он есть
59
+ if hasattr(pipe.unet, "fuse_lora"):
 
 
 
60
  pipe.fuse_lora(lora_scale=lora_scale)
61
 
62
+ # Генерируем изображение
63
+ image = pipe(
64
+ prompt=prompt,
65
+ negative_prompt=negative_prompt,
66
+ width=width,
67
+ height=height,
68
+ guidance_scale=guidance_scale,
69
+ num_inference_steps=num_inference_steps,
70
+ generator=generator,
71
+ ).images[0]
72
+
73
+ return image, seed
74
+
 
 
 
 
 
 
 
75
 
76
  css = """
77
  #col-container {
 
82
 
83
  with gr.Blocks(css=css) as demo:
84
  with gr.Column(elem_id="col-container"):
85
+ gr.Markdown(" # Text-to-Image with LoRA Support")
86
+
87
+ prompt = gr.Text(label="Prompt", placeholder="Enter your prompt")
88
+ negative_prompt = gr.Text(label="Negative Prompt", placeholder="Optional")
89
+ width = gr.Slider(256, 1024, value=512, step=64, label="Width")
90
+ height = gr.Slider(256, 1024, value=512, step=64, label="Height")
91
+ num_steps = gr.Slider(1, 50, value=20, step=1, label="Steps")
92
+ guidance = gr.Slider(1, 15, value=7, step=0.1, label="Guidance Scale")
93
+ lora_scale = gr.Slider(0, 1, value=0.5, step=0.1, label="LoRA Strength")
94
+ randomize_seed = gr.Checkbox(label="Randomize Seed")
95
+ result = gr.Image(label="Generated Image")
96
+
97
+ run_button = gr.Button("Generate")
98
+
99
+ run_button.click(
100
+ fn=infer,
101
+ inputs=[prompt, negative_prompt, randomize_seed, width, height, num_steps, guidance, lora_scale],
102
+ outputs=[result],
103
+ )
104
 
105
  if __name__ == "__main__":
106
+ demo.launch()