WarlordHermes commited on
Commit
06719c2
Β·
verified Β·
1 Parent(s): e810215

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +464 -173
app.py CHANGED
@@ -10,34 +10,43 @@ from typing import Iterable
10
  from gradio.themes import Soft
11
  from gradio.themes.utils import colors, fonts, sizes
12
 
13
- colors.orange_red = colors.Color(
14
- name="orange_red",
15
- c50="#FFF0E5",
16
- c100="#FFE0CC",
17
- c200="#FFC299",
18
- c300="#FFA366",
19
- c400="#FF8533",
20
- c500="#FF4500",
21
- c600="#E63E00",
22
- c700="#CC3700",
23
- c800="#B33000",
24
- c900="#992900",
25
- c950="#802200",
 
 
 
 
26
  )
27
 
28
- class OrangeRedTheme(Soft):
 
29
  def __init__(
30
  self,
31
  *,
32
  primary_hue: colors.Color | str = colors.gray,
33
- secondary_hue: colors.Color | str = colors.orange_red,
34
  neutral_hue: colors.Color | str = colors.slate,
35
- text_size: sizes.Size | str = sizes.text_lg,
36
  font: fonts.Font | str | Iterable[fonts.Font | str] = (
37
- fonts.GoogleFont("Outfit"), "Arial", "sans-serif",
 
 
38
  ),
39
  font_mono: fonts.Font | str | Iterable[fonts.Font | str] = (
40
- fonts.GoogleFont("IBM Plex Mono"), "ui-monospace", "monospace",
 
 
41
  ),
42
  ):
43
  super().__init__(
@@ -49,40 +58,53 @@ class OrangeRedTheme(Soft):
49
  font_mono=font_mono,
50
  )
51
  super().set(
52
- background_fill_primary="*primary_50",
53
- background_fill_primary_dark="*primary_900",
54
- body_background_fill="linear-gradient(135deg, *primary_200, *primary_100)",
55
- body_background_fill_dark="linear-gradient(135deg, *primary_900, *primary_800)",
 
 
 
 
 
 
 
 
 
 
56
  button_primary_text_color="white",
57
  button_primary_text_color_hover="white",
58
- button_primary_background_fill="linear-gradient(90deg, *secondary_500, *secondary_600)",
59
- button_primary_background_fill_hover="linear-gradient(90deg, *secondary_600, *secondary_700)",
60
- button_primary_background_fill_dark="linear-gradient(90deg, *secondary_600, *secondary_700)",
61
- button_primary_background_fill_hover_dark="linear-gradient(90deg, *secondary_500, *secondary_600)",
62
- button_secondary_text_color="black",
63
- button_secondary_text_color_hover="white",
64
- button_secondary_background_fill="linear-gradient(90deg, *primary_300, *primary_300)",
65
- button_secondary_background_fill_hover="linear-gradient(90deg, *primary_400, *primary_400)",
66
- button_secondary_background_fill_dark="linear-gradient(90deg, *primary_500, *primary_600)",
67
- button_secondary_background_fill_hover_dark="linear-gradient(90deg, *primary_500, *primary_500)",
 
 
68
  slider_color="*secondary_500",
69
- slider_color_dark="*secondary_600",
70
- block_title_text_weight="600",
71
- block_border_width="3px",
72
- block_shadow="*shadow_drop_lg",
73
- button_primary_shadow="*shadow_drop_lg",
74
- button_large_padding="11px",
75
- color_accent_soft="*primary_100",
76
- block_label_background_fill="*primary_200",
77
  )
78
 
79
- orange_red_theme = OrangeRedTheme()
80
 
81
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
82
 
83
- print("CUDA_VISIBLE_DEVICES=", os.environ.get("CUDA_VISIBLE_DEVICES"))
84
- print("torch.__version__ =", torch.__version__)
85
- print("Using device:", device)
 
 
 
 
 
86
 
87
  from diffusers import FlowMatchEulerDiscreteScheduler
88
  from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
@@ -96,9 +118,9 @@ pipe = QwenImageEditPlusPipeline.from_pretrained(
96
  transformer=QwenImageTransformer2DModel.from_pretrained(
97
  "prithivMLmods/Qwen-Image-Edit-Rapid-AIO-V19",
98
  torch_dtype=dtype,
99
- device_map='cuda'
100
  ),
101
- torch_dtype=dtype
102
  ).to(device)
103
 
104
  try:
@@ -109,78 +131,95 @@ except Exception as e:
109
 
110
  MAX_SEED = np.iinfo(np.int32).max
111
 
112
- DEFAULT_NEGATIVE_PROMPT = "worst quality, low quality, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, jpeg artifacts, signature, watermark, username, blurry"
 
 
 
 
113
 
 
 
 
114
 
115
  def update_dimensions_on_upload(image):
116
  if image is None:
117
  return 1024, 1024
118
-
119
- original_width, original_height = image.size
120
-
121
- if original_width > original_height:
122
- new_width = 1024
123
- aspect_ratio = original_height / original_width
124
- new_height = int(new_width * aspect_ratio)
125
  else:
126
- new_height = 1024
127
- aspect_ratio = original_width / original_height
128
- new_width = int(new_height * aspect_ratio)
129
-
130
- new_width = (new_width // 8) * 8
131
- new_height = (new_height // 8) * 8
132
-
133
- return new_width, new_height
134
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135
 
136
  @spaces.GPU
137
  def infer(
138
- images,
139
- prompt,
140
- negative_prompt,
141
- seed,
142
- randomize_seed,
143
- guidance_scale,
144
- steps,
145
- progress=gr.Progress(track_tqdm=True)
146
  ):
147
  gc.collect()
148
  torch.cuda.empty_cache()
149
 
150
  if not images:
151
- raise gr.Error("Please upload at least one image to edit.")
 
 
152
 
153
  pil_images = []
154
- if images is not None:
155
- for item in images:
156
- try:
157
- if isinstance(item, tuple) or isinstance(item, list):
158
- path_or_img = item[0]
159
- else:
160
- path_or_img = item
161
-
162
- if isinstance(path_or_img, str):
163
- pil_images.append(Image.open(path_or_img).convert("RGB"))
164
- elif isinstance(path_or_img, Image.Image):
165
- pil_images.append(path_or_img.convert("RGB"))
166
- else:
167
- pil_images.append(Image.open(path_or_img.name).convert("RGB"))
168
- except Exception as e:
169
- print(f"Skipping invalid image item: {e}")
170
- continue
171
 
172
  if not pil_images:
173
- raise gr.Error("Could not process uploaded images.")
174
 
175
  if randomize_seed:
176
  seed = random.randint(0, MAX_SEED)
177
 
178
  generator = torch.Generator(device=device).manual_seed(seed)
179
-
180
  width, height = update_dimensions_on_upload(pil_images[0])
181
 
182
  try:
183
- result_image = pipe(
184
  image=pil_images,
185
  prompt=prompt,
186
  negative_prompt=negative_prompt,
@@ -190,9 +229,7 @@ def infer(
190
  generator=generator,
191
  true_cfg_scale=guidance_scale,
192
  ).images[0]
193
-
194
- return result_image, seed
195
-
196
  finally:
197
  gc.collect()
198
  torch.cuda.empty_cache()
@@ -202,123 +239,377 @@ def infer(
202
  def infer_example(images, prompt):
203
  if not images:
204
  return None, 0
 
 
205
 
206
- if isinstance(images, str):
207
- images_list = [images]
208
- else:
209
- images_list = images
210
-
211
- result, seed = infer(
212
- images=images_list,
213
- prompt=prompt,
214
- negative_prompt=DEFAULT_NEGATIVE_PROMPT,
215
- seed=0,
216
- randomize_seed=True,
217
- guidance_scale=1.0,
218
- steps=4
219
- )
220
- return result, seed
221
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
222
 
223
  css = """
 
224
  #col-container {
225
  margin: 0 auto;
226
- max-width: 1000px;
227
  }
228
- #main-title h1 {font-size: 2.4em !important;}
229
- """
230
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
231
 
232
- with gr.Blocks() as demo:
233
- with gr.Column(elem_id="col-container"):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
234
 
235
- gr.Markdown("# **FireRed-Image-Edit-1.0-Fast**", elem_id="main-title")
 
 
 
 
 
 
 
 
236
 
237
- gr.Markdown(
238
- "Perform image edits using "
239
- "[FireRed-Image-Edit-1.0](https://huggingface.co/FireRedTeam/FireRed-Image-Edit-1.0)"
240
- " with 4-step fast inference."
241
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
242
 
243
- with gr.Row(equal_height=True):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
244
 
245
- with gr.Column():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
246
 
247
  images = gr.Gallery(
248
  label="Upload Images",
249
  type="filepath",
250
  columns=2,
251
  rows=1,
252
- height=300,
253
- allow_preview=True
 
254
  )
255
 
256
- prompt = gr.Text(
257
- label="Edit Prompt",
258
- max_lines=2,
259
- placeholder="e.g., transform into anime, upscale, change lighting..."
 
 
 
 
 
260
  )
261
 
262
- negative_prompt = gr.Textbox(
263
- label="Negative Prompt",
264
- value=DEFAULT_NEGATIVE_PROMPT,
265
- max_lines=3
266
  )
267
 
268
- run_button = gr.Button("Edit Image", variant="primary")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
269
 
270
- with gr.Column():
 
 
271
 
272
  output_image = gr.Image(
273
- label="Output Image",
274
  interactive=False,
275
  format="png",
276
- height=395
 
277
  )
278
 
279
- with gr.Accordion("Advanced Settings", open=False, visible=False):
280
-
281
- seed = gr.Slider(
282
- label="Seed",
283
- minimum=0,
284
- maximum=MAX_SEED,
285
- step=1,
286
- value=0
287
- )
288
 
289
- randomize_seed = gr.Checkbox(
290
- label="Randomize Seed",
291
- value=True
292
- )
 
 
 
 
 
 
293
 
294
- guidance_scale = gr.Slider(
295
- label="Guidance Scale",
296
- minimum=1.0,
297
- maximum=10.0,
298
- step=0.1,
299
- value=1.0
300
- )
 
 
 
 
301
 
302
- steps = gr.Slider(
303
- label="Inference Steps",
304
- minimum=1,
305
- maximum=50,
306
- step=1,
307
- value=4
308
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
309
 
310
- run_button.click(
311
- fn=infer,
312
- inputs=[images, prompt, negative_prompt, seed, randomize_seed, guidance_scale, steps],
313
- outputs=[output_image, seed]
314
- )
 
 
 
 
 
 
 
 
315
 
 
 
 
316
 
317
  if __name__ == "__main__":
318
  demo.queue(max_size=30).launch(
319
- css=css,
320
- theme=orange_red_theme,
321
  mcp_server=True,
322
  ssr_mode=False,
323
- show_error=True
324
  )
 
10
  from gradio.themes import Soft
11
  from gradio.themes.utils import colors, fonts, sizes
12
 
13
+ # ═══════════════════════════════════════════════════════════════════════
14
+ # THEME
15
+ # ═══════════════════════════════════════════════════════════════════════
16
+
17
+ colors.fire_red = colors.Color(
18
+ name="fire_red",
19
+ c50="#FFF5F0",
20
+ c100="#FFE8DB",
21
+ c200="#FFD0B5",
22
+ c300="#FFB088",
23
+ c400="#FF8C5A",
24
+ c500="#FF6B35",
25
+ c600="#E8531F",
26
+ c700="#CC4317",
27
+ c800="#A63812",
28
+ c900="#80300F",
29
+ c950="#5C220A",
30
  )
31
 
32
+
33
+ class FireRedTheme(Soft):
34
  def __init__(
35
  self,
36
  *,
37
  primary_hue: colors.Color | str = colors.gray,
38
+ secondary_hue: colors.Color | str = colors.fire_red,
39
  neutral_hue: colors.Color | str = colors.slate,
40
+ text_size: sizes.Size | str = sizes.text_md,
41
  font: fonts.Font | str | Iterable[fonts.Font | str] = (
42
+ fonts.GoogleFont("Inter"),
43
+ "system-ui",
44
+ "sans-serif",
45
  ),
46
  font_mono: fonts.Font | str | Iterable[fonts.Font | str] = (
47
+ fonts.GoogleFont("JetBrains Mono"),
48
+ "ui-monospace",
49
+ "monospace",
50
  ),
51
  ):
52
  super().__init__(
 
58
  font_mono=font_mono,
59
  )
60
  super().set(
61
+ body_background_fill="#f0f2f6",
62
+ body_background_fill_dark="*neutral_950",
63
+ background_fill_primary="white",
64
+ background_fill_primary_dark="*neutral_900",
65
+ block_background_fill="white",
66
+ block_background_fill_dark="*neutral_800",
67
+ block_border_width="1px",
68
+ block_border_color="*neutral_200",
69
+ block_border_color_dark="*neutral_700",
70
+ block_shadow="0 1px 4px rgba(0,0,0,0.05)",
71
+ block_shadow_dark="0 1px 4px rgba(0,0,0,0.25)",
72
+ block_title_text_weight="600",
73
+ block_label_background_fill="*neutral_50",
74
+ block_label_background_fill_dark="*neutral_800",
75
  button_primary_text_color="white",
76
  button_primary_text_color_hover="white",
77
+ button_primary_background_fill="linear-gradient(135deg, *secondary_500, *secondary_600)",
78
+ button_primary_background_fill_hover="linear-gradient(135deg, *secondary_600, *secondary_700)",
79
+ button_primary_background_fill_dark="linear-gradient(135deg, *secondary_500, *secondary_600)",
80
+ button_primary_background_fill_hover_dark="linear-gradient(135deg, *secondary_600, *secondary_700)",
81
+ button_primary_shadow="0 4px 14px rgba(232, 83, 31, 0.25)",
82
+ button_secondary_text_color="*secondary_700",
83
+ button_secondary_text_color_dark="*secondary_300",
84
+ button_secondary_background_fill="*secondary_50",
85
+ button_secondary_background_fill_hover="*secondary_100",
86
+ button_secondary_background_fill_dark="rgba(255, 107, 53, 0.1)",
87
+ button_secondary_background_fill_hover_dark="rgba(255, 107, 53, 0.2)",
88
+ button_large_padding="12px 24px",
89
  slider_color="*secondary_500",
90
+ slider_color_dark="*secondary_500",
91
+ input_border_color_focus="*secondary_400",
92
+ input_border_color_focus_dark="*secondary_500",
93
+ color_accent_soft="*secondary_50",
94
+ color_accent_soft_dark="rgba(255, 107, 53, 0.15)",
 
 
 
95
  )
96
 
 
97
 
98
+ theme = FireRedTheme()
99
 
100
+ # ═══════════════════════════════════════════════════════════════════════
101
+ # MODEL
102
+ # ═══════════════════════════════════════════════════════════════════════
103
+
104
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
105
+ print("CUDA_VISIBLE_DEVICES =", os.environ.get("CUDA_VISIBLE_DEVICES"))
106
+ print("torch.__version__ =", torch.__version__)
107
+ print("device =", device)
108
 
109
  from diffusers import FlowMatchEulerDiscreteScheduler
110
  from qwenimage.pipeline_qwenimage_edit_plus import QwenImageEditPlusPipeline
 
118
  transformer=QwenImageTransformer2DModel.from_pretrained(
119
  "prithivMLmods/Qwen-Image-Edit-Rapid-AIO-V19",
120
  torch_dtype=dtype,
121
+ device_map="cuda",
122
  ),
123
+ torch_dtype=dtype,
124
  ).to(device)
125
 
126
  try:
 
131
 
132
  MAX_SEED = np.iinfo(np.int32).max
133
 
134
+ DEFAULT_NEGATIVE_PROMPT = (
135
+ "worst quality, low quality, bad anatomy, bad hands, text, error, "
136
+ "missing fingers, extra digit, fewer digits, cropped, jpeg artifacts, "
137
+ "signature, watermark, username, blurry"
138
+ )
139
 
140
+ # ═══════════════════════════════════════════════════════════════════════
141
+ # HELPERS
142
+ # ═══════════════════════════════════════════════════════════════════════
143
 
144
  def update_dimensions_on_upload(image):
145
  if image is None:
146
  return 1024, 1024
147
+ w, h = image.size
148
+ if w > h:
149
+ nw, nh = 1024, int(1024 * h / w)
 
 
 
 
150
  else:
151
+ nh, nw = 1024, int(1024 * w / h)
152
+ return (nw // 8) * 8, (nh // 8) * 8
153
+
154
+
155
+ def format_seed(seed_val):
156
+ return f"{int(seed_val)}"
157
+
158
+
159
+ def format_info(seed_val, images):
160
+ if images:
161
+ try:
162
+ first = images[0]
163
+ path = first[0] if isinstance(first, (tuple, list)) else first
164
+ if isinstance(path, str):
165
+ im = Image.open(path)
166
+ elif isinstance(path, Image.Image):
167
+ im = path
168
+ else:
169
+ im = Image.open(path.name)
170
+ ow, oh = im.size
171
+ nw, nh = update_dimensions_on_upload(im)
172
+ return (
173
+ f"**Seed:** `{int(seed_val)}`\n\n"
174
+ f"**Original:** {ow}Γ—{oh} β†’ **Output:** {nw}Γ—{nh}"
175
+ )
176
+ except Exception:
177
+ pass
178
+ return f"**Seed:** `{int(seed_val)}`"
179
+
180
+
181
+ # ═══════════════════════════════════════════════════════════════════════
182
+ # INFERENCE
183
+ # ════════���══════════════════════════════════════════════════════════════
184
 
185
  @spaces.GPU
186
  def infer(
187
+ images, prompt, negative_prompt,
188
+ seed, randomize_seed, guidance_scale, steps,
189
+ progress=gr.Progress(track_tqdm=True),
 
 
 
 
 
190
  ):
191
  gc.collect()
192
  torch.cuda.empty_cache()
193
 
194
  if not images:
195
+ raise gr.Error("⚠️ Please upload at least one image.")
196
+ if not prompt or not prompt.strip():
197
+ raise gr.Error("⚠️ Please enter an edit prompt.")
198
 
199
  pil_images = []
200
+ for item in images:
201
+ try:
202
+ path = item[0] if isinstance(item, (tuple, list)) else item
203
+ if isinstance(path, str):
204
+ pil_images.append(Image.open(path).convert("RGB"))
205
+ elif isinstance(path, Image.Image):
206
+ pil_images.append(path.convert("RGB"))
207
+ else:
208
+ pil_images.append(Image.open(path.name).convert("RGB"))
209
+ except Exception as e:
210
+ print(f"Skipping invalid image: {e}")
 
 
 
 
 
 
211
 
212
  if not pil_images:
213
+ raise gr.Error("⚠️ Could not process uploaded images.")
214
 
215
  if randomize_seed:
216
  seed = random.randint(0, MAX_SEED)
217
 
218
  generator = torch.Generator(device=device).manual_seed(seed)
 
219
  width, height = update_dimensions_on_upload(pil_images[0])
220
 
221
  try:
222
+ result = pipe(
223
  image=pil_images,
224
  prompt=prompt,
225
  negative_prompt=negative_prompt,
 
229
  generator=generator,
230
  true_cfg_scale=guidance_scale,
231
  ).images[0]
232
+ return result, seed
 
 
233
  finally:
234
  gc.collect()
235
  torch.cuda.empty_cache()
 
239
  def infer_example(images, prompt):
240
  if not images:
241
  return None, 0
242
+ images_list = [images] if isinstance(images, str) else images
243
+ return infer(images_list, prompt, DEFAULT_NEGATIVE_PROMPT, 0, True, 1.0, 4)
244
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
245
 
246
+ # ═══════════════════════════════════════════════════════════════════════
247
+ # PROMPT SUGGESTIONS
248
+ # ═══════════════════════════════════════════════════════════════════════
249
+
250
+ SUGGESTIONS = [
251
+ "Transform into anime style",
252
+ "Convert to oil painting",
253
+ "Add dramatic sunset lighting",
254
+ "Make it a pencil sketch",
255
+ "Apply cyberpunk neon aesthetic",
256
+ "Add snow and winter vibes",
257
+ "Turn into watercolor art",
258
+ "Make it look vintage 1970s",
259
+ ]
260
+
261
+ # ═══════════════════════════════════════════════════════════════════════
262
+ # CSS
263
+ # ═══════════════════════════════════════════════════════════════════════
264
 
265
  css = """
266
+ /* ── Container ─────────────────────────────────────────────── */
267
  #col-container {
268
  margin: 0 auto;
269
+ max-width: 1120px;
270
  }
 
 
271
 
272
+ /* ── Header ─────────────────────��──────────────────────────── */
273
+ .hdr {
274
+ text-align: center;
275
+ padding: 38px 28px 30px;
276
+ background: linear-gradient(135deg, #1a1a2e 0%, #16213e 50%, #0f3460 100%);
277
+ border-radius: 20px;
278
+ margin-bottom: 22px;
279
+ border: 1px solid rgba(255,107,53,.15);
280
+ box-shadow: 0 12px 44px rgba(0,0,0,.10);
281
+ position: relative;
282
+ overflow: hidden;
283
+ }
284
+ .hdr::before {
285
+ content: "";
286
+ position: absolute; inset: 0;
287
+ background:
288
+ radial-gradient(ellipse at 25% 50%, rgba(255,107,53,.07) 0%, transparent 60%),
289
+ radial-gradient(ellipse at 80% 25%, rgba(255,140,90,.05) 0%, transparent 50%);
290
+ pointer-events: none;
291
+ }
292
+ .hdr > * { position: relative; z-index: 1; }
293
+ .hdr h1 {
294
+ font-size: 2.6em; font-weight: 800;
295
+ background: linear-gradient(135deg, #FF8C5A, #FF6B35, #FF4500);
296
+ -webkit-background-clip: text; -webkit-text-fill-color: transparent;
297
+ background-clip: text;
298
+ margin: 0 0 8px; letter-spacing: -.02em; line-height: 1.15;
299
+ }
300
+ .hdr .sub {
301
+ color: #94a3b8; font-size: 1.05em; margin: 0 0 16px; line-height: 1.55;
302
+ }
303
+ .hdr .sub a {
304
+ color: #FF8C5A; text-decoration: none;
305
+ border-bottom: 1px solid rgba(255,140,90,.3);
306
+ transition: border-color .2s;
307
+ }
308
+ .hdr .sub a:hover { border-bottom-color: #FF8C5A; }
309
+ .badges { display: flex; justify-content: center; gap: 8px; flex-wrap: wrap; }
310
+ .bdg {
311
+ background: rgba(255,107,53,.12); color: #FFB088;
312
+ padding: 5px 14px; border-radius: 100px;
313
+ font-size: .82em; font-weight: 500;
314
+ border: 1px solid rgba(255,107,53,.18);
315
+ }
316
 
317
+ /* ── Section Label ─────────────────────────────────────────── */
318
+ .stl {
319
+ font-size: .92em; font-weight: 700; color: #475569;
320
+ margin: 0 0 6px; display: flex; align-items: center; gap: 6px;
321
+ }
322
+ .dark .stl { color: #cbd5e1; }
323
+
324
+ /* ── Generate Button ───────────────────────────────────────── */
325
+ #gen-btn {
326
+ margin-top: 14px !important;
327
+ font-size: 1.1em !important; font-weight: 700 !important;
328
+ padding: 14px 28px !important; border-radius: 14px !important;
329
+ letter-spacing: .3px;
330
+ transition: all .25s cubic-bezier(.4,0,.2,1) !important;
331
+ min-height: 52px !important;
332
+ }
333
+ #gen-btn:hover {
334
+ transform: translateY(-2px) !important;
335
+ box-shadow: 0 8px 28px rgba(232,83,31,.40) !important;
336
+ }
337
+ #gen-btn:active { transform: translateY(0) !important; }
338
+
339
+ /* ── Clear Button ──────────────────────────────────────────── */
340
+ #clear-btn {
341
+ min-height: 52px !important;
342
+ margin-top: 14px !important;
343
+ border-radius: 14px !important;
344
+ font-weight: 600 !important;
345
+ }
346
 
347
+ /* ── Prompt Chip Row ───────────────────────────────────────── */
348
+ .chip-row { gap: 6px !important; margin-top: 2px !important; }
349
+ .chip-btn {
350
+ font-size: .78em !important; padding: 5px 13px !important;
351
+ border-radius: 100px !important; min-width: 0 !important;
352
+ font-weight: 500 !important; white-space: nowrap !important;
353
+ transition: all .2s ease !important;
354
+ }
355
+ .chip-btn:hover { transform: translateY(-1px) !important; }
356
 
357
+ /* ── Output Image ──────────────────────────────────────────── */
358
+ #output-img { border-radius: 14px !important; overflow: hidden; }
359
+
360
+ /* ── Info Box ──────────────────────────────────────────────── */
361
+ #info-box {
362
+ margin-top: 6px !important;
363
+ border-radius: 12px !important;
364
+ }
365
+ #info-box .prose {
366
+ font-family: 'JetBrains Mono', monospace;
367
+ font-size: .88em;
368
+ }
369
+
370
+ /* ── Tips ──────────────────────────────────────────────────── */
371
+ .tips {
372
+ background: linear-gradient(135deg, #FFF5F0, #FFE8DB);
373
+ border: 1px solid #FFD0B5; border-radius: 14px;
374
+ padding: 18px 24px; margin-top: 14px;
375
+ }
376
+ .tips h4 { margin: 0 0 10px; font-size: .95em; color: #A63812; }
377
+ .tips ul {
378
+ margin: 0; padding: 0 0 0 20px;
379
+ color: #80300F; font-size: .85em; line-height: 1.75;
380
+ }
381
+ .tips li { margin-bottom: 2px; }
382
+ .tips li::marker { color: #FF6B35; }
383
+ .tips strong { color: #A63812; }
384
 
385
+ .dark .tips {
386
+ background: linear-gradient(135deg, #2a1a10, #201510);
387
+ border-color: rgba(255,107,53,.2);
388
+ }
389
+ .dark .tips h4 { color: #FFB088; }
390
+ .dark .tips ul { color: #FFD0B5; }
391
+ .dark .tips strong { color: #FFB088; }
392
+
393
+ /* ── Footer ────────────────────────────────────────────────── */
394
+ .ftr {
395
+ text-align: center; padding: 18px; margin-top: 20px;
396
+ color: #94a3b8; font-size: .82em;
397
+ border-top: 1px solid #e2e8f0;
398
+ }
399
+ .dark .ftr { border-top-color: rgba(255,255,255,.08); }
400
+ .ftr a { color: #E8531F; text-decoration: none; font-weight: 500; }
401
+ .ftr a:hover { text-decoration: underline; }
402
+
403
+ /* ── Responsive ────────────────────────────────────────────── */
404
+ @media (max-width: 768px) {
405
+ .hdr h1 { font-size: 1.8em; }
406
+ .hdr { padding: 24px 16px 22px; }
407
+ .bdg { font-size: .72em; padding: 4px 10px; }
408
+ .chip-btn { font-size: .72em !important; padding: 4px 10px !important; }
409
+ }
410
+ """
411
 
412
+ # ═══════════════════════════════════════════════════════════════════════
413
+ # UI
414
+ # ═══════════════════════════════════════════════════════════════════════
415
+
416
+ with gr.Blocks(css=css, theme=theme, title="πŸ”₯ FireRed Image Edit") as demo:
417
+ with gr.Column(elem_id="col-container"):
418
+
419
+ # ── Header ──────────────────────────────────────────────────
420
+ gr.HTML("""
421
+ <div class="hdr">
422
+ <h1>πŸ”₯ FireRed Image Edit</h1>
423
+ <p class="sub">
424
+ AI-powered image editing with blazing-fast <strong>4-step inference</strong><br>
425
+ Powered by
426
+ <a href="https://huggingface.co/FireRedTeam/FireRed-Image-Edit-1.1"
427
+ target="_blank">FireRed-Image-Edit-1.1</a>
428
+ &amp;
429
+ <a href="https://huggingface.co/prithivMLmods/Qwen-Image-Edit-Rapid-AIO-V19"
430
+ target="_blank">Rapid-AIO-V19</a>
431
+ </p>
432
+ <div class="badges">
433
+ <span class="bdg">⚑ 4-Step Fast</span>
434
+ <span class="bdg">🎨 Style Transfer</span>
435
+ <span class="bdg">πŸ“ Auto Resize</span>
436
+ <span class="bdg">πŸ–ΌοΈ Multi-Image</span>
437
+ <span class="bdg">πŸ”§ BF16 Precision</span>
438
+ </div>
439
+ </div>
440
+ """)
441
+
442
+ # ── Main two-column layout ─────────────────────────────────
443
+ with gr.Row(equal_height=False):
444
+
445
+ # ─── Left: inputs ───────────────────────────────────────
446
+ with gr.Column(scale=1):
447
+ gr.HTML('<p class="stl">πŸ“€&nbsp; Upload Image(s)</p>')
448
 
449
  images = gr.Gallery(
450
  label="Upload Images",
451
  type="filepath",
452
  columns=2,
453
  rows=1,
454
+ height=280,
455
+ allow_preview=True,
456
+ object_fit="contain",
457
  )
458
 
459
+ gr.HTML('<p class="stl" style="margin-top:16px">✏️&nbsp; Describe Your Edit</p>')
460
+
461
+ prompt = gr.Textbox(
462
+ show_label=False,
463
+ max_lines=3,
464
+ placeholder=(
465
+ "e.g. 'Transform into a Studio Ghibli anime scene "
466
+ "with warm golden-hour lighting'"
467
+ ),
468
  )
469
 
470
+ # Suggestion chips
471
+ gr.HTML(
472
+ '<p style="font-size:.78em;color:#94a3b8;margin:10px 0 4px;">'
473
+ "πŸ’‘ Quick suggestions β€” click to fill prompt:</p>"
474
  )
475
 
476
+ chip_data_1, chip_data_2 = [], []
477
+ with gr.Row(elem_classes="chip-row"):
478
+ for t in SUGGESTIONS[:4]:
479
+ b = gr.Button(t, size="sm", variant="secondary",
480
+ elem_classes="chip-btn")
481
+ chip_data_1.append((b, t))
482
+
483
+ with gr.Row(elem_classes="chip-row"):
484
+ for t in SUGGESTIONS[4:]:
485
+ b = gr.Button(t, size="sm", variant="secondary",
486
+ elem_classes="chip-btn")
487
+ chip_data_2.append((b, t))
488
+
489
+ with gr.Row():
490
+ run_button = gr.Button(
491
+ "🎨 Generate Edit",
492
+ variant="primary", elem_id="gen-btn", size="lg", scale=3,
493
+ )
494
+ clear_button = gr.Button(
495
+ "πŸ—‘οΈ Clear",
496
+ variant="secondary", elem_id="clear-btn", size="lg", scale=1,
497
+ )
498
 
499
+ # ─── Right: output ──────────────────────────────────────
500
+ with gr.Column(scale=1):
501
+ gr.HTML('<p class="stl">πŸ–ΌοΈ&nbsp; Result</p>')
502
 
503
  output_image = gr.Image(
504
+ show_label=False,
505
  interactive=False,
506
  format="png",
507
+ height=420,
508
+ elem_id="output-img",
509
  )
510
 
511
+ info_box = gr.Markdown(
512
+ value="*Generate an edit to see details here.*",
513
+ elem_id="info-box",
514
+ )
 
 
 
 
 
515
 
516
+ # ── Advanced settings ───────────────────────────────────────
517
+ with gr.Accordion("βš™οΈ Advanced Settings", open=False):
518
+ with gr.Row():
519
+ seed = gr.Slider(
520
+ label="Seed", minimum=0, maximum=MAX_SEED, step=1,
521
+ value=0, scale=3,
522
+ )
523
+ randomize_seed = gr.Checkbox(
524
+ label="🎲 Randomize seed", value=True, scale=1,
525
+ )
526
 
527
+ with gr.Row():
528
+ guidance_scale = gr.Slider(
529
+ label="Guidance Scale",
530
+ minimum=1.0, maximum=10.0, step=0.1, value=1.0,
531
+ info="Higher β†’ stronger prompt adherence",
532
+ )
533
+ steps = gr.Slider(
534
+ label="Inference Steps",
535
+ minimum=1, maximum=50, step=1, value=4,
536
+ info="More steps β†’ higher quality (slower)",
537
+ )
538
 
539
+ negative_prompt = gr.Textbox(
540
+ label="Negative Prompt",
541
+ value=DEFAULT_NEGATIVE_PROMPT,
542
+ max_lines=3,
543
+ info="Describe what to avoid in the output",
544
+ )
545
+
546
+ # ── Tips ────────────────────────────────────────────────────
547
+ gr.HTML("""
548
+ <div class="tips">
549
+ <h4>πŸ’‘ Tips for Best Results</h4>
550
+ <ul>
551
+ <li><strong>Be specific</strong> β€” clearly describe
552
+ the change you want</li>
553
+ <li><strong>Style keywords</strong> β€” "anime", "oil painting",
554
+ "watercolor", "pixel art", "3D render"</li>
555
+ <li><strong>Lighting</strong> β€” "golden hour", "dramatic shadows",
556
+ "soft diffused light", "neon glow"</li>
557
+ <li><strong>Higher quality</strong> β€” increase steps to 8-12
558
+ for finer details (takes longer)</li>
559
+ <li><strong>Multiple images</strong> β€” upload extra reference
560
+ images for richer context</li>
561
+ </ul>
562
+ </div>
563
+ """)
564
+
565
+ # ── Footer ──────────────────────────────────────────────────
566
+ gr.HTML("""
567
+ <div class="ftr">
568
+ Model&nbsp;
569
+ <a href="https://huggingface.co/FireRedTeam/FireRed-Image-Edit-1.1"
570
+ target="_blank">FireRed-Image-Edit-1.1</a>
571
+ &nbsp;Β·&nbsp; Accelerated&nbsp;
572
+ <a href="https://huggingface.co/prithivMLmods/Qwen-Image-Edit-Rapid-AIO-V19"
573
+ target="_blank">Rapid-AIO-V19</a>
574
+ </div>
575
+ """)
576
+
577
+ # ═══════════════════════════════════════════════════════════════
578
+ # EVENT WIRING
579
+ # ═══════════════════════════════════════════════════════════════
580
+
581
+ # Suggestion chips β†’ fill prompt
582
+ for btn, text in chip_data_1 + chip_data_2:
583
+ btn.click(fn=lambda t=text: t, inputs=[], outputs=[prompt])
584
+
585
+ # Clear button
586
+ clear_button.click(
587
+ fn=lambda: (None, "", None, "*Generate an edit to see details here.*"),
588
+ inputs=[],
589
+ outputs=[images, prompt, output_image, info_box],
590
+ )
591
 
592
+ # Generate
593
+ run_button.click(
594
+ fn=infer,
595
+ inputs=[
596
+ images, prompt, negative_prompt,
597
+ seed, randomize_seed, guidance_scale, steps,
598
+ ],
599
+ outputs=[output_image, seed],
600
+ ).then(
601
+ fn=format_info,
602
+ inputs=[seed, images],
603
+ outputs=[info_box],
604
+ )
605
 
606
+ # ═══════════════════════════════════════════════════════════════════════
607
+ # LAUNCH
608
+ # ═══════════════════════════════════════════════════════════════════════
609
 
610
  if __name__ == "__main__":
611
  demo.queue(max_size=30).launch(
 
 
612
  mcp_server=True,
613
  ssr_mode=False,
614
+ show_error=True,
615
  )