Alexander Bagus commited on
Commit
91266b1
·
1 Parent(s): 3124e5a
Files changed (1) hide show
  1. app.py +37 -41
app.py CHANGED
@@ -89,8 +89,8 @@ def prepare(prompt, is_polish_prompt):
89
  def inference(
90
  prompt,
91
  negative_prompt,
92
- input_image,
93
- image_scale=1.0,
94
  control_mode='Canny',
95
  control_context_scale = 0.75,
96
  seed=42,
@@ -104,10 +104,11 @@ def inference(
104
 
105
  # process image
106
  print("DEBUG: process image")
107
- if input_image is None:
108
- print("Error: input_image is empty.")
109
  return None
110
 
 
111
  # input_image, width, height = scale_image(input_image, image_scale)
112
  # control_mode='HED'
113
  processor_id = 'canny'
@@ -124,27 +125,29 @@ def inference(
124
  processor = Processor(processor_id)
125
 
126
  # Width must be divisible by 16
127
- control_image, width, height = image_utils.rescale_image(input_image, image_scale, 16)
128
- control_image = control_image.resize((1024, 1024))
 
 
129
 
130
  print("DEBUG: control_image_torch")
131
  sample_size = [height, width]
132
- control_image = processor(control_image, to_pil=True)
133
- control_image = control_image.resize((width, height))
134
- control_image_torch = get_image_latent(control_image, sample_size=sample_size)[:, :, 0]
135
 
136
- mask_image = None
137
- # inpaint_image = None
138
-
139
  if mask_image is not None:
140
  mask_image = get_image_latent(mask_image, sample_size=sample_size)[:, :1, 0]
141
  else:
142
  mask_image = torch.ones([1, 1, sample_size[0], sample_size[1]]) * 255
143
 
144
- # if inpaint_image is not None:
145
- # inpaint_image = get_image_latent(inpaint_image, sample_size=sample_size)[:, :, 0]
146
- # else:
147
- # inpaint_image = torch.zeros([1, 3, sample_size[0], sample_size[1]])
 
 
148
 
149
  # generation
150
  if randomize_seed: seed = random.randint(0, MAX_SEED)
@@ -157,9 +160,9 @@ def inference(
157
  width=width,
158
  generator=generator,
159
  guidance_scale=guidance_scale,
160
- image = None,
161
  mask_image = mask_image,
162
- control_image=control_image_torch,
163
  num_inference_steps=num_inference_steps,
164
  control_context_scale=control_context_scale,
165
  ).images[0]
@@ -188,18 +191,12 @@ with gr.Blocks() as demo:
188
  gr.HTML(read_file("static/header.html"))
189
  with gr.Row():
190
  with gr.Column():
191
- mask_image = gr.ImageEditor(
192
- height=290,
193
- sources=['upload', 'clipboard'],
194
- image_mode='RGB',
195
- type="pil", label="Mask Image"
196
- )
197
-
198
- input_image = gr.Image(
199
  height=290,
200
  sources=['upload', 'clipboard'],
201
  image_mode='RGB',
202
- type="pil", label="Upload"
 
203
  )
204
 
205
  prompt = gr.Textbox(
@@ -240,23 +237,23 @@ with gr.Blocks() as demo:
240
  step=0.01,
241
  value=0.75,
242
  )
243
-
244
- with gr.Row():
245
- guidance_scale = gr.Slider(
246
  label="Guidance scale",
247
  minimum=0.0,
248
  maximum=10.0,
249
  step=0.1,
250
  value=1.0,
251
  )
 
 
252
 
253
- image_scale = gr.Slider(
254
- label="Image scale",
255
- minimum=0.5,
256
- maximum=2.0,
257
- step=0.1,
258
- value=1.0,
259
- )
260
 
261
  seed = gr.Slider(
262
  label="Seed",
@@ -274,10 +271,9 @@ with gr.Blocks() as demo:
274
  with gr.Accordion("Preprocessor output", open=False):
275
  control_image = gr.Image(label="Control image", show_label=False)
276
 
277
- gr.Examples(examples=examples, inputs=[input_image, prompt, control_mode])
278
  gr.Markdown(read_file("static/footer.md"))
279
 
280
- mask_image.upload(fn=lambda x: x, inputs=[mask_image], outputs=[input_image])
281
  run_button.click(
282
  fn=prepare,
283
  inputs=[prompt, is_polish_prompt],
@@ -288,8 +284,8 @@ with gr.Blocks() as demo:
288
  inputs=[
289
  polished_prompt,
290
  negative_prompt,
291
- input_image,
292
- image_scale,
293
  control_mode,
294
  control_context_scale,
295
  seed,
 
89
  def inference(
90
  prompt,
91
  negative_prompt,
92
+ edit_dict,
93
+ # image_scale=1.0,
94
  control_mode='Canny',
95
  control_context_scale = 0.75,
96
  seed=42,
 
104
 
105
  # process image
106
  print("DEBUG: process image")
107
+ if edit_dict is None:
108
+ print("Error: edit_dict is empty.")
109
  return None
110
 
111
+ print(edit_dict)
112
  # input_image, width, height = scale_image(input_image, image_scale)
113
  # control_mode='HED'
114
  processor_id = 'canny'
 
125
  processor = Processor(processor_id)
126
 
127
  # Width must be divisible by 16
128
+
129
+ # control_image, width, height = image_utils.rescale_image(input_image, image_scale, 16)
130
+ # control_image = control_image.resize((1024, 1024))
131
+ width, height = edit_dict['background'].size
132
 
133
  print("DEBUG: control_image_torch")
134
  sample_size = [height, width]
135
+ # control_image = processor(control_image, to_pil=True)
136
+ # control_image = control_image.resize((width, height))
137
+ # control_image_torch = get_image_latent(control_image, sample_size=sample_size)[:, :, 0]
138
 
139
+ mask_image = edit_dict['composite']
 
 
140
  if mask_image is not None:
141
  mask_image = get_image_latent(mask_image, sample_size=sample_size)[:, :1, 0]
142
  else:
143
  mask_image = torch.ones([1, 1, sample_size[0], sample_size[1]]) * 255
144
 
145
+ inpaint_image = edit_dict['background']
146
+ if inpaint_image is not None:
147
+ inpaint_image = get_image_latent(inpaint_image, sample_size=sample_size)[:, :, 0]
148
+ else:
149
+ inpaint_image = torch.zeros([1, 3, sample_size[0], sample_size[1]])
150
+
151
 
152
  # generation
153
  if randomize_seed: seed = random.randint(0, MAX_SEED)
 
160
  width=width,
161
  generator=generator,
162
  guidance_scale=guidance_scale,
163
+ image = inpaint_image,
164
  mask_image = mask_image,
165
+ # control_image=control_image_torch,
166
  num_inference_steps=num_inference_steps,
167
  control_context_scale=control_context_scale,
168
  ).images[0]
 
191
  gr.HTML(read_file("static/header.html"))
192
  with gr.Row():
193
  with gr.Column():
194
+ edit_dict = gr.ImageEditor(
 
 
 
 
 
 
 
195
  height=290,
196
  sources=['upload', 'clipboard'],
197
  image_mode='RGB',
198
+ type="PIL",
199
+ label="Mask Image"
200
  )
201
 
202
  prompt = gr.Textbox(
 
237
  step=0.01,
238
  value=0.75,
239
  )
240
+ guidance_scale = gr.Slider(
 
 
241
  label="Guidance scale",
242
  minimum=0.0,
243
  maximum=10.0,
244
  step=0.1,
245
  value=1.0,
246
  )
247
+ # with gr.Row():
248
+
249
 
250
+ # image_scale = gr.Slider(
251
+ # label="Image scale",
252
+ # minimum=0.5,
253
+ # maximum=2.0,
254
+ # step=0.1,
255
+ # value=1.0,
256
+ # )
257
 
258
  seed = gr.Slider(
259
  label="Seed",
 
271
  with gr.Accordion("Preprocessor output", open=False):
272
  control_image = gr.Image(label="Control image", show_label=False)
273
 
274
+ gr.Examples(examples=examples, inputs=[edit_dict, prompt, control_mode])
275
  gr.Markdown(read_file("static/footer.md"))
276
 
 
277
  run_button.click(
278
  fn=prepare,
279
  inputs=[prompt, is_polish_prompt],
 
284
  inputs=[
285
  polished_prompt,
286
  negative_prompt,
287
+ edit_dict,
288
+ # image_scale,
289
  control_mode,
290
  control_context_scale,
291
  seed,