WarlordHermes commited on
Commit
e810215
·
verified ·
1 Parent(s): b5bd34c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +74 -36
app.py CHANGED
@@ -92,7 +92,7 @@ from qwenimage.qwen_fa3_processor import QwenDoubleStreamAttnProcessorFA3
92
  dtype = torch.bfloat16
93
 
94
  pipe = QwenImageEditPlusPipeline.from_pretrained(
95
- "FireRedTeam/FireRed-Image-Edit-1.0",
96
  transformer=QwenImageTransformer2DModel.from_pretrained(
97
  "prithivMLmods/Qwen-Image-Edit-Rapid-AIO-V19",
98
  torch_dtype=dtype,
@@ -109,6 +109,9 @@ except Exception as e:
109
 
110
  MAX_SEED = np.iinfo(np.int32).max
111
 
 
 
 
112
  def update_dimensions_on_upload(image):
113
  if image is None:
114
  return 1024, 1024
@@ -129,10 +132,12 @@ def update_dimensions_on_upload(image):
129
 
130
  return new_width, new_height
131
 
 
132
  @spaces.GPU
133
  def infer(
134
  images,
135
  prompt,
 
136
  seed,
137
  randomize_seed,
138
  guidance_scale,
@@ -171,7 +176,6 @@ def infer(
171
  seed = random.randint(0, MAX_SEED)
172
 
173
  generator = torch.Generator(device=device).manual_seed(seed)
174
- negative_prompt = "worst quality, low quality, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, jpeg artifacts, signature, watermark, username, blurry"
175
 
176
  width, height = update_dimensions_on_upload(pil_images[0])
177
 
@@ -189,12 +193,11 @@ def infer(
189
 
190
  return result_image, seed
191
 
192
- except Exception as e:
193
- raise e
194
  finally:
195
  gc.collect()
196
  torch.cuda.empty_cache()
197
 
 
198
  @spaces.GPU
199
  def infer_example(images, prompt):
200
  if not images:
@@ -208,6 +211,7 @@ def infer_example(images, prompt):
208
  result, seed = infer(
209
  images=images_list,
210
  prompt=prompt,
 
211
  seed=0,
212
  randomize_seed=True,
213
  guidance_scale=1.0,
@@ -215,6 +219,7 @@ def infer_example(images, prompt):
215
  )
216
  return result, seed
217
 
 
218
  css = """
219
  #col-container {
220
  margin: 0 auto;
@@ -223,16 +228,24 @@ css = """
223
  #main-title h1 {font-size: 2.4em !important;}
224
  """
225
 
 
226
  with gr.Blocks() as demo:
227
  with gr.Column(elem_id="col-container"):
 
228
  gr.Markdown("# **FireRed-Image-Edit-1.0-Fast**", elem_id="main-title")
229
- gr.Markdown("Perform image edits using [FireRed-Image-Edit-1.0](https://huggingface.co/FireRedTeam/FireRed-Image-Edit-1.0) with 4-step fast inference. Open on [GitHub](https://github.com/PRITHIVSAKTHIUR/FireRed-Image-Edit-1.0-Fast)")
 
 
 
 
 
230
 
231
  with gr.Row(equal_height=True):
 
232
  with gr.Column():
 
233
  images = gr.Gallery(
234
  label="Upload Images",
235
- #sources=["upload", "clipboard"],
236
  type="filepath",
237
  columns=2,
238
  rows=1,
@@ -242,45 +255,70 @@ with gr.Blocks() as demo:
242
 
243
  prompt = gr.Text(
244
  label="Edit Prompt",
245
- show_label=True,
246
  max_lines=2,
247
- placeholder="e.g., transform into anime, upscale, change lighting...",
 
 
 
 
 
 
248
  )
249
 
250
  run_button = gr.Button("Edit Image", variant="primary")
251
 
252
  with gr.Column():
253
- output_image = gr.Image(label="Output Image", interactive=False, format="png", height=395)
 
 
 
 
 
 
254
 
255
  with gr.Accordion("Advanced Settings", open=False, visible=False):
256
- seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
257
- randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
258
- guidance_scale = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=1.0)
259
- steps = gr.Slider(label="Inference Steps", minimum=1, maximum=50, step=1, value=4)
260
-
261
- gr.Examples(
262
- examples=[
263
- [["examples/1.jpg"], "cinematic polaroid with soft grain subtle vignette gentle lighting white frame handwritten photographed 'Fire-Edit' preserving realistic texture and details."],
264
- [["examples/2.jpg"], "Transform the image into a dotted cartoon style."],
265
- [["examples/3.jpeg"], "Convert it to black and white."],
266
- [["examples/4.jpg", "examples/5.jpg"], "Replace her glasses with the new glasses from image 1."],
267
- [["examples/8.jpg", "examples/9.png"], "Replace the current clothing with the clothing from the reference image 2. Keep the person’s face, hairstyle, body pose, background, lighting, and camera angle unchanged. Ensure the new outfit fits naturally with realistic fabric texture, proper shadows, folds, and accurate proportions. Match the lighting, color tone, and overall style for a seamless and high-quality result."],
268
- [["examples/10.jpg", "examples/11.png"], "Replace the current clothing with the clothing from the reference image 2. Keep the person’s face, hairstyle, body pose, background, lighting, and camera angle unchanged. Ensure the new outfit fits naturally with realistic fabric texture, proper shadows, folds, and accurate proportions. Match the lighting, color tone, and overall style for a seamless and high-quality result."],
269
- ],
270
- inputs=[images, prompt],
271
- outputs=[output_image, seed],
272
- fn=infer_example,
273
- cache_examples=False,
274
- label="Examples"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
275
  )
276
-
277
- gr.Markdown("[*](https://huggingface.co/FireRedTeam/FireRed-Image-Edit-1.0)This is still an experimental Space for FireRed-Image-Edit-1.0.")
278
 
279
- run_button.click(
280
- fn=infer,
281
- inputs=[images, prompt, seed, randomize_seed, guidance_scale, steps],
282
- outputs=[output_image, seed]
283
- )
284
 
285
  if __name__ == "__main__":
286
- demo.queue(max_size=30).launch(css=css, theme=orange_red_theme, mcp_server=True, ssr_mode=False, show_error=True)
 
 
 
 
 
 
 
92
  dtype = torch.bfloat16
93
 
94
  pipe = QwenImageEditPlusPipeline.from_pretrained(
95
+ "FireRedTeam/FireRed-Image-Edit-1.1",
96
  transformer=QwenImageTransformer2DModel.from_pretrained(
97
  "prithivMLmods/Qwen-Image-Edit-Rapid-AIO-V19",
98
  torch_dtype=dtype,
 
109
 
110
  MAX_SEED = np.iinfo(np.int32).max
111
 
112
+ DEFAULT_NEGATIVE_PROMPT = "worst quality, low quality, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, jpeg artifacts, signature, watermark, username, blurry"
113
+
114
+
115
  def update_dimensions_on_upload(image):
116
  if image is None:
117
  return 1024, 1024
 
132
 
133
  return new_width, new_height
134
 
135
+
136
  @spaces.GPU
137
  def infer(
138
  images,
139
  prompt,
140
+ negative_prompt,
141
  seed,
142
  randomize_seed,
143
  guidance_scale,
 
176
  seed = random.randint(0, MAX_SEED)
177
 
178
  generator = torch.Generator(device=device).manual_seed(seed)
 
179
 
180
  width, height = update_dimensions_on_upload(pil_images[0])
181
 
 
193
 
194
  return result_image, seed
195
 
 
 
196
  finally:
197
  gc.collect()
198
  torch.cuda.empty_cache()
199
 
200
+
201
  @spaces.GPU
202
  def infer_example(images, prompt):
203
  if not images:
 
211
  result, seed = infer(
212
  images=images_list,
213
  prompt=prompt,
214
+ negative_prompt=DEFAULT_NEGATIVE_PROMPT,
215
  seed=0,
216
  randomize_seed=True,
217
  guidance_scale=1.0,
 
219
  )
220
  return result, seed
221
 
222
+
223
  css = """
224
  #col-container {
225
  margin: 0 auto;
 
228
  #main-title h1 {font-size: 2.4em !important;}
229
  """
230
 
231
+
232
  with gr.Blocks() as demo:
233
  with gr.Column(elem_id="col-container"):
234
+
235
  gr.Markdown("# **FireRed-Image-Edit-1.0-Fast**", elem_id="main-title")
236
+
237
+ gr.Markdown(
238
+ "Perform image edits using "
239
+ "[FireRed-Image-Edit-1.0](https://huggingface.co/FireRedTeam/FireRed-Image-Edit-1.0)"
240
+ " with 4-step fast inference."
241
+ )
242
 
243
  with gr.Row(equal_height=True):
244
+
245
  with gr.Column():
246
+
247
  images = gr.Gallery(
248
  label="Upload Images",
 
249
  type="filepath",
250
  columns=2,
251
  rows=1,
 
255
 
256
  prompt = gr.Text(
257
  label="Edit Prompt",
 
258
  max_lines=2,
259
+ placeholder="e.g., transform into anime, upscale, change lighting..."
260
+ )
261
+
262
+ negative_prompt = gr.Textbox(
263
+ label="Negative Prompt",
264
+ value=DEFAULT_NEGATIVE_PROMPT,
265
+ max_lines=3
266
  )
267
 
268
  run_button = gr.Button("Edit Image", variant="primary")
269
 
270
  with gr.Column():
271
+
272
+ output_image = gr.Image(
273
+ label="Output Image",
274
+ interactive=False,
275
+ format="png",
276
+ height=395
277
+ )
278
 
279
  with gr.Accordion("Advanced Settings", open=False, visible=False):
280
+
281
+ seed = gr.Slider(
282
+ label="Seed",
283
+ minimum=0,
284
+ maximum=MAX_SEED,
285
+ step=1,
286
+ value=0
287
+ )
288
+
289
+ randomize_seed = gr.Checkbox(
290
+ label="Randomize Seed",
291
+ value=True
292
+ )
293
+
294
+ guidance_scale = gr.Slider(
295
+ label="Guidance Scale",
296
+ minimum=1.0,
297
+ maximum=10.0,
298
+ step=0.1,
299
+ value=1.0
300
+ )
301
+
302
+ steps = gr.Slider(
303
+ label="Inference Steps",
304
+ minimum=1,
305
+ maximum=50,
306
+ step=1,
307
+ value=4
308
+ )
309
+
310
+ run_button.click(
311
+ fn=infer,
312
+ inputs=[images, prompt, negative_prompt, seed, randomize_seed, guidance_scale, steps],
313
+ outputs=[output_image, seed]
314
  )
 
 
315
 
 
 
 
 
 
316
 
317
  if __name__ == "__main__":
318
+ demo.queue(max_size=30).launch(
319
+ css=css,
320
+ theme=orange_red_theme,
321
+ mcp_server=True,
322
+ ssr_mode=False,
323
+ show_error=True
324
+ )