seregasmirnov commited on
Commit
6958c6d
·
verified ·
1 Parent(s): dc21506

lora model

Browse files
Files changed (1) hide show
  1. app.py +41 -35
app.py CHANGED
@@ -1,9 +1,9 @@
1
  import gradio as gr
2
  import numpy as np
3
  import random
4
-
5
  # import spaces #[uncomment to use ZeroGPU]
6
- from diffusers import DiffusionPipeline
 
7
  import torch
8
  from sympy.core.random import choice
9
  from rembg import remove
@@ -14,7 +14,6 @@ MAX_SEED = np.iinfo(np.int32).max
14
  MAX_IMAGE_SIZE = 1024
15
  # model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
16
 
17
-
18
  # @spaces.GPU #[uncomment to use ZeroGPU]
19
  def infer(
20
  model_id,
@@ -35,11 +34,17 @@ def infer(
35
  torch_dtype = torch.float16
36
  else:
37
  torch_dtype = torch.float32
38
- #print(model_id)
39
- pipe = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch_dtype)
 
40
  if model_id == "CompVis/stable-diffusion-v1-4" and lora == "pepe":
41
  lora_id = "seregasmirnov/pepe-lora"
42
- pipe.load_lora_weights(lora_id)
 
 
 
 
 
43
  pipe = pipe.to(device)
44
 
45
  if randomize_seed:
@@ -47,16 +52,28 @@ def infer(
47
 
48
  generator = torch.Generator().manual_seed(seed)
49
 
50
- image = pipe(
51
- prompt=prompt,
52
- negative_prompt=negative_prompt,
53
- guidance_scale=guidance_scale,
54
- num_inference_steps=num_inference_steps,
55
- width=width,
56
- height=height,
57
- generator=generator,
58
- ).images[0]
59
-
 
 
 
 
 
 
 
 
 
 
 
 
60
  if del_back:
61
  image = remove(image)
62
 
@@ -65,6 +82,7 @@ def infer(
65
 
66
 
67
  examples = [
 
68
  "cute animal",
69
  "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
70
  "An astronaut riding a green horse",
@@ -97,7 +115,7 @@ with gr.Blocks(css=css) as demo:
97
  step=0.1,
98
  value=1.0,
99
  visible=False,
100
- info="Adjust LoRA adapter strength"
101
  )
102
 
103
  def setup_lora(sel_model, sel_lora):
@@ -108,12 +126,6 @@ with gr.Blocks(css=css) as demo:
108
  return [gr.Dropdown(choices=["None", "pepe"], info="Choose lora", visible=True), gr.Slider(visible=True)]
109
  else:
110
  return [gr.Dropdown(choices=["None", "pepe"], info="Choose lora", visible=False), gr.Slider(visible=False)]
111
-
112
- def setup_lora_scale(selected_option):
113
- if selected_option == "None":
114
- return gr.Slider(visible=False)
115
- else:
116
- return gr.Slider(visible=True)
117
 
118
  model_id.change(
119
  fn=setup_lora,
@@ -125,12 +137,6 @@ with gr.Blocks(css=css) as demo:
125
  inputs=[model_id, lora],
126
  outputs=[lora, lora_scale])
127
 
128
-
129
- #lora.change(
130
- # fn=setup_lora_scale,
131
- # inputs=lora,
132
- # outputs=lora_scale)
133
-
134
  with gr.Row():
135
  del_back = gr.Checkbox(label="Delete background", value=False)
136
 
@@ -140,7 +146,7 @@ with gr.Blocks(css=css) as demo:
140
  max_lines=1,
141
  placeholder="Enter your prompt",
142
  container=False,
143
- value="cute_animal",
144
  )
145
 
146
  run_button = gr.Button("Run", scale=0, variant="primary")
@@ -153,7 +159,7 @@ with gr.Blocks(css=css) as demo:
153
  max_lines=1,
154
  placeholder="Enter a negative prompt",
155
  visible=True,
156
- value="cat, dog",
157
  )
158
  gr.Examples(examples=neg_examples, inputs=[negative_prompt])
159
 
@@ -173,7 +179,7 @@ with gr.Blocks(css=css) as demo:
173
  minimum=256,
174
  maximum=MAX_IMAGE_SIZE,
175
  step=32,
176
- value=1024, # Replace with defaults that work for your model
177
  )
178
 
179
  height = gr.Slider(
@@ -181,7 +187,7 @@ with gr.Blocks(css=css) as demo:
181
  minimum=256,
182
  maximum=MAX_IMAGE_SIZE,
183
  step=32,
184
- value=1024, # Replace with defaults that work for your model
185
  )
186
 
187
  guidance_scale = gr.Slider(
@@ -189,7 +195,7 @@ with gr.Blocks(css=css) as demo:
189
  minimum=0.0,
190
  maximum=10.0,
191
  step=0.1,
192
- value=7.0,#0.0, # Replace with defaults that work for your model
193
  )
194
 
195
  num_inference_steps = gr.Slider(
@@ -197,7 +203,7 @@ with gr.Blocks(css=css) as demo:
197
  minimum=1,
198
  maximum=50,
199
  step=1,
200
- value=2, # Replace with defaults that work for your model
201
  )
202
 
203
  gr.Examples(examples=examples, inputs=[prompt])
 
1
  import gradio as gr
2
  import numpy as np
3
  import random
 
4
  # import spaces #[uncomment to use ZeroGPU]
5
+ from diffusers import StableDiffusionPipeline
6
+ from peft import PeftModel, PeftConfig
7
  import torch
8
  from sympy.core.random import choice
9
  from rembg import remove
 
14
  MAX_IMAGE_SIZE = 1024
15
  # model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
16
 
 
17
  # @spaces.GPU #[uncomment to use ZeroGPU]
18
  def infer(
19
  model_id,
 
34
  torch_dtype = torch.float16
35
  else:
36
  torch_dtype = torch.float32
37
+
38
+ pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch_dtype, use_safetensors=True)
39
+ is_lora = False
40
  if model_id == "CompVis/stable-diffusion-v1-4" and lora == "pepe":
41
  lora_id = "seregasmirnov/pepe-lora"
42
+ pipe.unet = PeftModel.from_pretrained(
43
+ pipe.unet,
44
+ lora_id,
45
+ adapter_name="default"
46
+ )
47
+ is_lora = True
48
  pipe = pipe.to(device)
49
 
50
  if randomize_seed:
 
52
 
53
  generator = torch.Generator().manual_seed(seed)
54
 
55
+ if is_lora:
56
+ image = pipe(
57
+ prompt=prompt,
58
+ negative_prompt=negative_prompt,
59
+ guidance_scale=guidance_scale,
60
+ num_inference_steps=num_inference_steps,
61
+ width=width,
62
+ height=height,
63
+ generator=generator,
64
+ cross_attention_kwargs={"scale": lora_scale}
65
+ ).images[0]
66
+ else:
67
+ image = pipe(
68
+ prompt=prompt,
69
+ negative_prompt=negative_prompt,
70
+ guidance_scale=guidance_scale,
71
+ num_inference_steps=num_inference_steps,
72
+ width=width,
73
+ height=height,
74
+ generator=generator
75
+ ).images[0]
76
+
77
  if del_back:
78
  image = remove(image)
79
 
 
82
 
83
 
84
  examples = [
85
+ "sticker of a happy cat climbing a tree",
86
  "cute animal",
87
  "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
88
  "An astronaut riding a green horse",
 
115
  step=0.1,
116
  value=1.0,
117
  visible=False,
118
+ info="setup lora strength"
119
  )
120
 
121
  def setup_lora(sel_model, sel_lora):
 
126
  return [gr.Dropdown(choices=["None", "pepe"], info="Choose lora", visible=True), gr.Slider(visible=True)]
127
  else:
128
  return [gr.Dropdown(choices=["None", "pepe"], info="Choose lora", visible=False), gr.Slider(visible=False)]
 
 
 
 
 
 
129
 
130
  model_id.change(
131
  fn=setup_lora,
 
137
  inputs=[model_id, lora],
138
  outputs=[lora, lora_scale])
139
 
 
 
 
 
 
 
140
  with gr.Row():
141
  del_back = gr.Checkbox(label="Delete background", value=False)
142
 
 
146
  max_lines=1,
147
  placeholder="Enter your prompt",
148
  container=False,
149
+ value="sticker of a happy cat climbing a tree",
150
  )
151
 
152
  run_button = gr.Button("Run", scale=0, variant="primary")
 
159
  max_lines=1,
160
  placeholder="Enter a negative prompt",
161
  visible=True,
162
+ value="",
163
  )
164
  gr.Examples(examples=neg_examples, inputs=[negative_prompt])
165
 
 
179
  minimum=256,
180
  maximum=MAX_IMAGE_SIZE,
181
  step=32,
182
+ value=512,
183
  )
184
 
185
  height = gr.Slider(
 
187
  minimum=256,
188
  maximum=MAX_IMAGE_SIZE,
189
  step=32,
190
+ value=512,
191
  )
192
 
193
  guidance_scale = gr.Slider(
 
195
  minimum=0.0,
196
  maximum=10.0,
197
  step=0.1,
198
+ value=4.0,
199
  )
200
 
201
  num_inference_steps = gr.Slider(
 
203
  minimum=1,
204
  maximum=50,
205
  step=1,
206
+ value=50,
207
  )
208
 
209
  gr.Examples(examples=examples, inputs=[prompt])